mirror of
https://github.com/ansible/awx.git
synced 2026-01-09 23:12:08 -03:30
Updated inventory plugins from ansible, updated all third party packages in awx/lib/site-packages to latest versions, minor serializer fixes after package upgrades.
This commit is contained in:
parent
9e898953dd
commit
56f8d6748b
@ -302,6 +302,24 @@ class BaseSerializer(serializers.ModelSerializer):
|
||||
else:
|
||||
return obj.active
|
||||
|
||||
def get_validation_exclusions(self, instance=None):
|
||||
# Override base class method to continue to use model validation for
|
||||
# fields (including optional ones), appears this was broken by DRF
|
||||
# 2.3.13 update.
|
||||
cls = self.opts.model
|
||||
opts = get_concrete_model(cls)._meta
|
||||
exclusions = [field.name for field in opts.fields + opts.many_to_many]
|
||||
for field_name, field in self.fields.items():
|
||||
field_name = field.source or field_name
|
||||
if field_name not in exclusions:
|
||||
continue
|
||||
if field.read_only:
|
||||
continue
|
||||
if isinstance(field, serializers.Serializer):
|
||||
continue
|
||||
exclusions.remove(field_name)
|
||||
return exclusions
|
||||
|
||||
|
||||
class UnifiedJobTemplateSerializer(BaseSerializer):
|
||||
|
||||
|
||||
@ -1,54 +1,54 @@
|
||||
Local versions of third-party packages required by AWX. Package names and
|
||||
Local versions of third-party packages required by Tower. Package names and
|
||||
versions are listed below, along with notes on which files are included.
|
||||
|
||||
amqp==1.3.3 (amqp/*)
|
||||
amqp==1.4.4 (amqp/*)
|
||||
anyjson==0.3.3 (anyjson/*)
|
||||
argparse==1.2.1 (argparse.py, needed for Python 2.6 support)
|
||||
Babel==1.3 (babel/*, excluded bin/pybabel)
|
||||
billiard==3.3.0.13 (billiard/*, funtests/*, excluded _billiard.so)
|
||||
boto==2.21.2 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin,
|
||||
billiard==3.3.0.16 (billiard/*, funtests/*, excluded _billiard.so)
|
||||
boto==2.27.0 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin,
|
||||
bin/cq, bin/cwutil, bin/dynamodb_dump, bin/dynamodb_load, bin/elbadmin,
|
||||
bin/fetch_file, bin/glacier, bin/instance_events, bin/kill_instance,
|
||||
bin/launch_instance, bin/list_instances, bin/lss3, bin/mturk,
|
||||
bin/pyami_sendmail, bin/route53, bin/s3put, bin/sdbadmin, bin/taskadmin)
|
||||
celery==3.1.7 (celery/*, excluded bin/celery*)
|
||||
celery==3.1.10 (celery/*, excluded bin/celery*)
|
||||
d2to1==0.2.11 (d2to1/*)
|
||||
distribute==0.7.3 (no files)
|
||||
django-auth-ldap==1.1.7 (django_auth_ldap/*)
|
||||
django-celery==3.1.1 (djcelery/*)
|
||||
django-extensions==1.2.5 (django_extensions/*)
|
||||
django-auth-ldap==1.1.8 (django_auth_ldap/*)
|
||||
django-celery==3.1.10 (djcelery/*)
|
||||
django-extensions==1.3.3 (django_extensions/*)
|
||||
django-jsonfield==0.9.12 (jsonfield/*, minor fix in jsonfield/fields.py)
|
||||
django-polymorphic==0.5.3 (polymorphic/*)
|
||||
django-split-settings==0.1.1 (split_settings/*)
|
||||
django-taggit==0.11.2 (taggit/*)
|
||||
djangorestframework==2.3.10 (rest_framework/*)
|
||||
djangorestframework==2.3.13 (rest_framework/*)
|
||||
httplib2==0.8 (httplib2/*)
|
||||
importlib==1.0.2 (importlib/*, needed for Python 2.6 support)
|
||||
iso8601==0.1.8 (iso8601/*)
|
||||
keyring==3.3 (keyring/*, excluded bin/keyring)
|
||||
kombu==3.0.8 (kombu/*)
|
||||
Markdown==2.3.1 (markdown/*, excluded bin/markdown_py)
|
||||
importlib==1.0.3 (importlib/*, needed for Python 2.6 support)
|
||||
iso8601==0.1.10 (iso8601/*)
|
||||
keyring==3.7 (keyring/*, excluded bin/keyring)
|
||||
kombu==3.0.14 (kombu/*)
|
||||
Markdown==2.4 (markdown/*, excluded bin/markdown_py)
|
||||
mock==1.0.1 (mock.py)
|
||||
ordereddict==1.1 (ordereddict.py, needed for Python 2.6 support)
|
||||
os-diskconfig-python-novaclient-ext==0.1.1 (os_diskconfig_python_novaclient_ext/*)
|
||||
os-diskconfig-python-novaclient-ext==0.1.2 (os_diskconfig_python_novaclient_ext/*)
|
||||
os-networksv2-python-novaclient-ext==0.21 (os_networksv2_python_novaclient_ext.py)
|
||||
os-virtual-interfacesv2-python-novaclient-ext==0.14 (os_virtual_interfacesv2_python_novaclient_ext.py)
|
||||
pbr==0.5.23 (pbr/*)
|
||||
pexpect==3.0 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py,
|
||||
os-virtual-interfacesv2-python-novaclient-ext==0.15 (os_virtual_interfacesv2_python_novaclient_ext.py)
|
||||
pbr==0.8.0 (pbr/*)
|
||||
pexpect==3.1 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py,
|
||||
ANSI.py)
|
||||
pip==1.5 (pip/*, excluded bin/pip*)
|
||||
pip==1.5.4 (pip/*, excluded bin/pip*)
|
||||
prettytable==0.7.2 (prettytable.py)
|
||||
pyrax==1.6.2 (pyrax/*)
|
||||
pyrax==1.7.2 (pyrax/*)
|
||||
python-dateutil==2.2 (dateutil/*)
|
||||
python-novaclient==2.15.0 (novaclient/*, excluded bin/nova)
|
||||
python-swiftclient==1.8.0 (swiftclient/*, excluded bin/swift)
|
||||
pytz==2013.8 (pytz/*)
|
||||
rackspace-auth-openstack==1.2 (rackspace_auth_openstack/*)
|
||||
python-novaclient==2.17.0 (novaclient/*, excluded bin/nova)
|
||||
python-swiftclient==2.0.3 (swiftclient/*, excluded bin/swift)
|
||||
pytz==2014.2 (pytz/*)
|
||||
rackspace-auth-openstack==1.3 (rackspace_auth_openstack/*)
|
||||
rackspace-novaclient==1.4 (no files)
|
||||
rax-default-network-flags-python-novaclient-ext==0.1.3 (rax_default_network_flags_python_novaclient_ext/*)
|
||||
rax-default-network-flags-python-novaclient-ext==0.2.3 (rax_default_network_flags_python_novaclient_ext/*)
|
||||
rax-scheduled-images-python-novaclient-ext==0.2.1 (rax_scheduled_images_python_novaclient_ext/*)
|
||||
requests==2.1.0 (requests/*)
|
||||
setuptools==2.0.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*)
|
||||
simplejson==3.3.1 (simplejson/*, excluded simplejson/_speedups.so)
|
||||
six==1.4.1 (six.py)
|
||||
requests==2.2.1 (requests/*)
|
||||
setuptools==2.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*)
|
||||
simplejson==3.3.3 (simplejson/*, excluded simplejson/_speedups.so)
|
||||
six==1.6.1 (six.py)
|
||||
South==0.8.4 (south/*)
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
|
||||
from __future__ import absolute_import
|
||||
|
||||
VERSION = (1, 3, 3)
|
||||
VERSION = (1, 4, 4)
|
||||
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
|
||||
__author__ = 'Barry Pederson'
|
||||
__maintainer__ = 'Ask Solem'
|
||||
|
||||
@ -19,12 +19,6 @@ from __future__ import absolute_import
|
||||
from .exceptions import AMQPNotImplementedError, RecoverableConnectionError
|
||||
from .serialization import AMQPWriter
|
||||
|
||||
try:
|
||||
bytes
|
||||
except NameError:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
__all__ = ['AbstractChannel']
|
||||
|
||||
|
||||
|
||||
@ -31,6 +31,15 @@ __all__ = ['Channel']
|
||||
|
||||
AMQP_LOGGER = logging.getLogger('amqp')
|
||||
|
||||
EXCHANGE_AUTODELETE_DEPRECATED = """\
|
||||
The auto_delete flag for exchanges has been deprecated and will be removed
|
||||
from py-amqp v1.5.0.\
|
||||
"""
|
||||
|
||||
|
||||
class VDeprecationWarning(DeprecationWarning):
|
||||
pass
|
||||
|
||||
|
||||
class Channel(AbstractChannel):
|
||||
"""Work with channels
|
||||
@ -604,8 +613,7 @@ class Channel(AbstractChannel):
|
||||
self._send_method((40, 10), args)
|
||||
|
||||
if auto_delete:
|
||||
warn(DeprecationWarning(
|
||||
'auto_delete exchanges has been deprecated'))
|
||||
warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED))
|
||||
|
||||
if not nowait:
|
||||
return self.wait(allowed_methods=[
|
||||
|
||||
@ -34,7 +34,7 @@ from .exceptions import (
|
||||
ConnectionForced, ConnectionError, error_for_code,
|
||||
RecoverableConnectionError, RecoverableChannelError,
|
||||
)
|
||||
from .five import items, range, values
|
||||
from .five import items, range, values, monotonic
|
||||
from .method_framing import MethodReader, MethodWriter
|
||||
from .serialization import AMQPWriter
|
||||
from .transport import create_transport
|
||||
@ -80,9 +80,26 @@ class Connection(AbstractChannel):
|
||||
"""
|
||||
Channel = Channel
|
||||
|
||||
#: Final heartbeat interval value (in float seconds) after negotiation
|
||||
heartbeat = None
|
||||
|
||||
#: Original heartbeat interval value proposed by client.
|
||||
client_heartbeat = None
|
||||
|
||||
#: Original heartbeat interval proposed by server.
|
||||
server_heartbeat = None
|
||||
|
||||
#: Time of last heartbeat sent (in monotonic time, if available).
|
||||
last_heartbeat_sent = 0
|
||||
|
||||
#: Time of last heartbeat received (in monotonic time, if available).
|
||||
last_heartbeat_received = 0
|
||||
|
||||
#: Number of bytes sent to socket at the last heartbeat check.
|
||||
prev_sent = None
|
||||
|
||||
#: Number of bytes received from socket at the last heartbeat check.
|
||||
prev_recv = None
|
||||
missed_heartbeats = 0
|
||||
|
||||
def __init__(self, host='localhost', userid='guest', password='guest',
|
||||
login_method='AMQPLAIN', login_response=None,
|
||||
@ -125,7 +142,7 @@ class Connection(AbstractChannel):
|
||||
# Properties set in the Tune method
|
||||
self.channel_max = channel_max
|
||||
self.frame_max = frame_max
|
||||
self.heartbeat = heartbeat
|
||||
self.client_heartbeat = heartbeat
|
||||
|
||||
self.confirm_publish = confirm_publish
|
||||
|
||||
@ -840,10 +857,22 @@ class Connection(AbstractChannel):
|
||||
want a heartbeat.
|
||||
|
||||
"""
|
||||
client_heartbeat = self.client_heartbeat or 0
|
||||
self.channel_max = args.read_short() or self.channel_max
|
||||
self.frame_max = args.read_long() or self.frame_max
|
||||
self.method_writer.frame_max = self.frame_max
|
||||
heartbeat = args.read_short() # noqa
|
||||
self.server_heartbeat = args.read_short() or 0
|
||||
|
||||
# negotiate the heartbeat interval to the smaller of the
|
||||
# specified values
|
||||
if self.server_heartbeat == 0 or client_heartbeat == 0:
|
||||
self.heartbeat = max(self.server_heartbeat, client_heartbeat)
|
||||
else:
|
||||
self.heartbeat = min(self.server_heartbeat, client_heartbeat)
|
||||
|
||||
# Ignore server heartbeat if client_heartbeat is disabled
|
||||
if not self.client_heartbeat:
|
||||
self.heartbeat = 0
|
||||
|
||||
self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat)
|
||||
|
||||
@ -851,28 +880,34 @@ class Connection(AbstractChannel):
|
||||
self.transport.write_frame(8, 0, bytes())
|
||||
|
||||
def heartbeat_tick(self, rate=2):
|
||||
"""Verify that hartbeats are sent and received.
|
||||
|
||||
:keyword rate: Rate is how often the tick is called
|
||||
compared to the actual heartbeat value. E.g. if
|
||||
the heartbeat is set to 3 seconds, and the tick
|
||||
is called every 3 / 2 seconds, then the rate is 2.
|
||||
"""Send heartbeat packets, if necessary, and fail if none have been
|
||||
received recently. This should be called frequently, on the order of
|
||||
once per second.
|
||||
|
||||
:keyword rate: Ignored
|
||||
"""
|
||||
if not self.heartbeat:
|
||||
return
|
||||
|
||||
# treat actual data exchange in either direction as a heartbeat
|
||||
sent_now = self.method_writer.bytes_sent
|
||||
recv_now = self.method_reader.bytes_recv
|
||||
|
||||
if self.prev_sent is not None and self.prev_sent == sent_now:
|
||||
self.send_heartbeat()
|
||||
|
||||
if self.prev_recv is not None and self.prev_recv == recv_now:
|
||||
self.missed_heartbeats += 1
|
||||
else:
|
||||
self.missed_heartbeats = 0
|
||||
|
||||
if self.prev_sent is None or self.prev_sent != sent_now:
|
||||
self.last_heartbeat_sent = monotonic()
|
||||
if self.prev_recv is None or self.prev_recv != recv_now:
|
||||
self.last_heartbeat_received = monotonic()
|
||||
self.prev_sent, self.prev_recv = sent_now, recv_now
|
||||
|
||||
if self.missed_heartbeats >= rate:
|
||||
# send a heartbeat if it's time to do so
|
||||
if monotonic() > self.last_heartbeat_sent + self.heartbeat:
|
||||
self.send_heartbeat()
|
||||
self.last_heartbeat_sent = monotonic()
|
||||
|
||||
# if we've missed two intervals' heartbeats, fail; this gives the
|
||||
# server enough time to send heartbeats a little late
|
||||
if (self.last_heartbeat_received and
|
||||
self.last_heartbeat_received + 2 *
|
||||
self.heartbeat < monotonic()):
|
||||
raise ConnectionForced('Too many heartbeats missed')
|
||||
|
||||
def _x_tune_ok(self, channel_max, frame_max, heartbeat):
|
||||
|
||||
@ -131,3 +131,58 @@ def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
|
||||
return Type(Class.__name__, Class.__bases__, attrs)
|
||||
|
||||
return _clone_with_metaclass
|
||||
|
||||
############## time.monotonic ################################################
|
||||
|
||||
if sys.version_info < (3, 3):
|
||||
|
||||
import platform
|
||||
SYSTEM = platform.system()
|
||||
|
||||
if SYSTEM == 'Darwin':
|
||||
import ctypes
|
||||
from ctypes.util import find_library
|
||||
libSystem = ctypes.CDLL('libSystem.dylib')
|
||||
CoreServices = ctypes.CDLL(find_library('CoreServices'),
|
||||
use_errno=True)
|
||||
mach_absolute_time = libSystem.mach_absolute_time
|
||||
mach_absolute_time.restype = ctypes.c_uint64
|
||||
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
|
||||
absolute_to_nanoseconds.restype = ctypes.c_uint64
|
||||
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
|
||||
|
||||
def _monotonic():
|
||||
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
|
||||
|
||||
elif SYSTEM == 'Linux':
|
||||
# from stackoverflow:
|
||||
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
|
||||
import ctypes
|
||||
import os
|
||||
|
||||
CLOCK_MONOTONIC = 1 # see <linux/time.h>
|
||||
|
||||
class timespec(ctypes.Structure):
|
||||
_fields_ = [
|
||||
('tv_sec', ctypes.c_long),
|
||||
('tv_nsec', ctypes.c_long),
|
||||
]
|
||||
|
||||
librt = ctypes.CDLL('librt.so.1', use_errno=True)
|
||||
clock_gettime = librt.clock_gettime
|
||||
clock_gettime.argtypes = [
|
||||
ctypes.c_int, ctypes.POINTER(timespec),
|
||||
]
|
||||
|
||||
def _monotonic(): # noqa
|
||||
t = timespec()
|
||||
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
|
||||
errno_ = ctypes.get_errno()
|
||||
raise OSError(errno_, os.strerror(errno_))
|
||||
return t.tv_sec + t.tv_nsec * 1e-9
|
||||
else:
|
||||
from time import time as _monotonic
|
||||
try:
|
||||
from time import monotonic
|
||||
except ImportError:
|
||||
monotonic = _monotonic # noqa
|
||||
|
||||
@ -19,12 +19,6 @@ from __future__ import absolute_import
|
||||
from collections import defaultdict, deque
|
||||
from struct import pack, unpack
|
||||
|
||||
try:
|
||||
bytes
|
||||
except NameError:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
from .basic_message import Message
|
||||
from .exceptions import AMQPError, UnexpectedFrame
|
||||
from .five import range, string
|
||||
|
||||
@ -25,6 +25,7 @@ import sys
|
||||
|
||||
from datetime import datetime
|
||||
from decimal import Decimal
|
||||
from io import BytesIO
|
||||
from struct import pack, unpack
|
||||
from time import mktime
|
||||
|
||||
@ -39,19 +40,6 @@ if IS_PY3K:
|
||||
else:
|
||||
byte = chr
|
||||
|
||||
try:
|
||||
from io import BytesIO
|
||||
except ImportError: # Py2.5
|
||||
try:
|
||||
from cStringIO import StringIO as BytesIO # noqa
|
||||
except ImportError:
|
||||
from StringIO import StringIO as BytesIO # noqa
|
||||
|
||||
try:
|
||||
bytes
|
||||
except NameError:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
|
||||
ILLEGAL_TABLE_TYPE_WITH_KEY = """\
|
||||
Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}]
|
||||
@ -174,6 +162,8 @@ class AMQPReader(object):
|
||||
val = self.read_bit()
|
||||
elif ftype == 100:
|
||||
val = self.read_float()
|
||||
elif ftype == 86: # 'V'
|
||||
val = None
|
||||
else:
|
||||
raise FrameSyntaxError(
|
||||
'Unknown value in table: {0!r} ({1!r})'.format(
|
||||
@ -357,6 +347,8 @@ class AMQPWriter(object):
|
||||
elif isinstance(v, (list, tuple)):
|
||||
self.write(b'A')
|
||||
self.write_array(v)
|
||||
elif v is None:
|
||||
self.write(b'V')
|
||||
else:
|
||||
err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k
|
||||
else ILLEGAL_TABLE_TYPE.format(type(v), v))
|
||||
|
||||
@ -1,9 +1,3 @@
|
||||
"""
|
||||
Read/Write AMQP frames over network transports.
|
||||
|
||||
2009-01-14 Barry Pederson <bp@barryp.org>
|
||||
|
||||
"""
|
||||
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
|
||||
#
|
||||
# This library is free software; you can redistribute it and/or
|
||||
@ -24,6 +18,7 @@ from __future__ import absolute_import
|
||||
import errno
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
# Jython does not have this attribute
|
||||
try:
|
||||
@ -31,27 +26,18 @@ try:
|
||||
except ImportError: # pragma: no cover
|
||||
from socket import IPPROTO_TCP as SOL_TCP # noqa
|
||||
|
||||
#
|
||||
# See if Python 2.6+ SSL support is available
|
||||
#
|
||||
try:
|
||||
import ssl
|
||||
HAVE_PY26_SSL = True
|
||||
except:
|
||||
HAVE_PY26_SSL = False
|
||||
|
||||
try:
|
||||
bytes
|
||||
except:
|
||||
# Python 2.5 and lower
|
||||
bytes = str
|
||||
from ssl import SSLError
|
||||
except ImportError:
|
||||
class SSLError(Exception): # noqa
|
||||
pass
|
||||
|
||||
from struct import pack, unpack
|
||||
|
||||
from .exceptions import UnexpectedFrame
|
||||
from .utils import get_errno, set_cloexec
|
||||
|
||||
_UNAVAIL = errno.EAGAIN, errno.EINTR
|
||||
_UNAVAIL = errno.EAGAIN, errno.EINTR, errno.ENOENT
|
||||
|
||||
AMQP_PORT = 5672
|
||||
|
||||
@ -166,6 +152,10 @@ class _AbstractTransport(object):
|
||||
except socket.timeout:
|
||||
raise
|
||||
except (OSError, IOError, socket.error) as exc:
|
||||
# Don't disconnect for ssl read time outs
|
||||
# http://bugs.python.org/issue10272
|
||||
if isinstance(exc, SSLError) and 'timed out' in str(exc):
|
||||
raise socket.timeout()
|
||||
if get_errno(exc) not in _UNAVAIL:
|
||||
self.connected = False
|
||||
raise
|
||||
@ -200,22 +190,17 @@ class SSLTransport(_AbstractTransport):
|
||||
super(SSLTransport, self).__init__(host, connect_timeout)
|
||||
|
||||
def _setup_transport(self):
|
||||
"""Wrap the socket in an SSL object, either the
|
||||
new Python 2.6 version, or the older Python 2.5 and
|
||||
lower version."""
|
||||
if HAVE_PY26_SSL:
|
||||
if hasattr(self, 'sslopts'):
|
||||
self.sock = ssl.wrap_socket(self.sock, **self.sslopts)
|
||||
else:
|
||||
self.sock = ssl.wrap_socket(self.sock)
|
||||
self.sock.do_handshake()
|
||||
"""Wrap the socket in an SSL object."""
|
||||
if hasattr(self, 'sslopts'):
|
||||
self.sock = ssl.wrap_socket(self.sock, **self.sslopts)
|
||||
else:
|
||||
self.sock = socket.ssl(self.sock)
|
||||
self.sock = ssl.wrap_socket(self.sock)
|
||||
self.sock.do_handshake()
|
||||
self._quick_recv = self.sock.read
|
||||
|
||||
def _shutdown_transport(self):
|
||||
"""Unwrap a Python 2.6 SSL socket, so we can call shutdown()"""
|
||||
if HAVE_PY26_SSL and self.sock is not None:
|
||||
if self.sock is not None:
|
||||
try:
|
||||
unwrap = self.sock.unwrap
|
||||
except AttributeError:
|
||||
@ -232,7 +217,7 @@ class SSLTransport(_AbstractTransport):
|
||||
try:
|
||||
while len(rbuf) < n:
|
||||
try:
|
||||
s = recv(131072) # see note above
|
||||
s = recv(n - len(rbuf)) # see note above
|
||||
except socket.error as exc:
|
||||
# ssl.sock.read may cause ENOENT if the
|
||||
# operation couldn't be performed (Issue celery#1414).
|
||||
@ -275,7 +260,7 @@ class TCPTransport(_AbstractTransport):
|
||||
try:
|
||||
while len(rbuf) < n:
|
||||
try:
|
||||
s = recv(131072)
|
||||
s = recv(n - len(rbuf))
|
||||
except socket.error as exc:
|
||||
if not initial and exc.errno in _errnos:
|
||||
continue
|
||||
|
||||
@ -11,7 +11,8 @@ except ImportError:
|
||||
class promise(object):
|
||||
if not hasattr(sys, 'pypy_version_info'):
|
||||
__slots__ = tuple(
|
||||
'fun args kwargs value ready failed on_success on_error'.split()
|
||||
'fun args kwargs value ready failed '
|
||||
' on_success on_error calls'.split()
|
||||
)
|
||||
|
||||
def __init__(self, fun, args=(), kwargs=(),
|
||||
@ -24,6 +25,7 @@ class promise(object):
|
||||
self.on_success = on_success
|
||||
self.on_error = on_error
|
||||
self.value = None
|
||||
self.calls = 0
|
||||
|
||||
def __repr__(self):
|
||||
return '<$: {0.fun.__name__}(*{0.args!r}, **{0.kwargs!r})'.format(
|
||||
@ -43,6 +45,7 @@ class promise(object):
|
||||
self.on_success(self.value)
|
||||
finally:
|
||||
self.ready = True
|
||||
self.calls += 1
|
||||
|
||||
def then(self, callback=None, on_error=None):
|
||||
self.on_success = callback
|
||||
|
||||
@ -19,7 +19,7 @@
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
VERSION = (3, 3, 0, 13)
|
||||
VERSION = (3, 3, 0, 16)
|
||||
__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
|
||||
__author__ = 'R Oudkerk / Python Software Foundation'
|
||||
__author_email__ = 'python-dev@python.org'
|
||||
|
||||
@ -1,964 +0,0 @@
|
||||
#
|
||||
# A higher level module for using sockets (or Windows named pipes)
|
||||
#
|
||||
# multiprocessing/connection.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
__all__ = ['Client', 'Listener', 'Pipe', 'wait']
|
||||
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import select
|
||||
import socket
|
||||
import struct
|
||||
import errno
|
||||
import tempfile
|
||||
import itertools
|
||||
|
||||
import _multiprocessing
|
||||
from .compat import setblocking
|
||||
from .exceptions import AuthenticationError, BufferTooShort
|
||||
from .five import monotonic
|
||||
from .util import get_temp_dir, Finalize, sub_debug
|
||||
from .reduction import ForkingPickler
|
||||
try:
|
||||
import _winapi
|
||||
from _winapi import (
|
||||
WAIT_OBJECT_0,
|
||||
WAIT_TIMEOUT,
|
||||
INFINITE,
|
||||
)
|
||||
# if we got here, we seem to be running on Windows. Handle probably
|
||||
# missing WAIT_ABANDONED_0 constant:
|
||||
try:
|
||||
from _winapi import WAIT_ABANDONED_0
|
||||
except ImportError:
|
||||
WAIT_ABANDONED_0 = 128 # _winapi seems to be not exporting
|
||||
# this constant, fallback solution until
|
||||
# exported in _winapi
|
||||
except ImportError:
|
||||
if sys.platform == 'win32':
|
||||
raise
|
||||
_winapi = None
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
BUFSIZE = 8192
|
||||
# A very generous timeout when it comes to local connections...
|
||||
CONNECTION_TIMEOUT = 20.
|
||||
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
default_family = 'AF_INET'
|
||||
families = ['AF_INET']
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
default_family = 'AF_UNIX'
|
||||
families += ['AF_UNIX']
|
||||
|
||||
if sys.platform == 'win32':
|
||||
default_family = 'AF_PIPE'
|
||||
families += ['AF_PIPE']
|
||||
|
||||
|
||||
def _init_timeout(timeout=CONNECTION_TIMEOUT):
|
||||
return monotonic() + timeout
|
||||
|
||||
|
||||
def _check_timeout(t):
|
||||
return monotonic() > t
|
||||
|
||||
|
||||
def arbitrary_address(family):
|
||||
'''
|
||||
Return an arbitrary free address for the given family
|
||||
'''
|
||||
if family == 'AF_INET':
|
||||
return ('localhost', 0)
|
||||
elif family == 'AF_UNIX':
|
||||
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
|
||||
elif family == 'AF_PIPE':
|
||||
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
|
||||
(os.getpid(), next(_mmap_counter)))
|
||||
else:
|
||||
raise ValueError('unrecognized family')
|
||||
|
||||
|
||||
def _validate_family(family):
|
||||
'''
|
||||
Checks if the family is valid for the current environment.
|
||||
'''
|
||||
if sys.platform != 'win32' and family == 'AF_PIPE':
|
||||
raise ValueError('Family %s is not recognized.' % family)
|
||||
|
||||
if sys.platform == 'win32' and family == 'AF_UNIX':
|
||||
# double check
|
||||
if not hasattr(socket, family):
|
||||
raise ValueError('Family %s is not recognized.' % family)
|
||||
|
||||
|
||||
def address_type(address):
|
||||
'''
|
||||
Return the types of the address
|
||||
|
||||
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
|
||||
'''
|
||||
if type(address) == tuple:
|
||||
return 'AF_INET'
|
||||
elif type(address) is str and address.startswith('\\\\'):
|
||||
return 'AF_PIPE'
|
||||
elif type(address) is str:
|
||||
return 'AF_UNIX'
|
||||
else:
|
||||
raise ValueError('address type of %r unrecognized' % address)
|
||||
|
||||
#
|
||||
# Connection classes
|
||||
#
|
||||
|
||||
|
||||
class _ConnectionBase:
|
||||
_handle = None
|
||||
|
||||
def __init__(self, handle, readable=True, writable=True):
|
||||
handle = handle.__index__()
|
||||
if handle < 0:
|
||||
raise ValueError("invalid handle")
|
||||
if not readable and not writable:
|
||||
raise ValueError(
|
||||
"at least one of `readable` and `writable` must be True")
|
||||
self._handle = handle
|
||||
self._readable = readable
|
||||
self._writable = writable
|
||||
|
||||
# XXX should we use util.Finalize instead of a __del__?
|
||||
|
||||
def __del__(self):
|
||||
if self._handle is not None:
|
||||
self._close()
|
||||
|
||||
def _check_closed(self):
|
||||
if self._handle is None:
|
||||
raise OSError("handle is closed")
|
||||
|
||||
def _check_readable(self):
|
||||
if not self._readable:
|
||||
raise OSError("connection is write-only")
|
||||
|
||||
def _check_writable(self):
|
||||
if not self._writable:
|
||||
raise OSError("connection is read-only")
|
||||
|
||||
def _bad_message_length(self):
|
||||
if self._writable:
|
||||
self._readable = False
|
||||
else:
|
||||
self.close()
|
||||
raise OSError("bad message length")
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""True if the connection is closed"""
|
||||
return self._handle is None
|
||||
|
||||
@property
|
||||
def readable(self):
|
||||
"""True if the connection is readable"""
|
||||
return self._readable
|
||||
|
||||
@property
|
||||
def writable(self):
|
||||
"""True if the connection is writable"""
|
||||
return self._writable
|
||||
|
||||
def fileno(self):
|
||||
"""File descriptor or handle of the connection"""
|
||||
self._check_closed()
|
||||
return self._handle
|
||||
|
||||
def close(self):
|
||||
"""Close the connection"""
|
||||
if self._handle is not None:
|
||||
try:
|
||||
self._close()
|
||||
finally:
|
||||
self._handle = None
|
||||
|
||||
def send_bytes(self, buf, offset=0, size=None):
|
||||
"""Send the bytes data from a bytes-like object"""
|
||||
self._check_closed()
|
||||
self._check_writable()
|
||||
m = memoryview(buf)
|
||||
# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
|
||||
if m.itemsize > 1:
|
||||
m = memoryview(bytes(m))
|
||||
n = len(m)
|
||||
if offset < 0:
|
||||
raise ValueError("offset is negative")
|
||||
if n < offset:
|
||||
raise ValueError("buffer length < offset")
|
||||
if size is None:
|
||||
size = n - offset
|
||||
elif size < 0:
|
||||
raise ValueError("size is negative")
|
||||
elif offset + size > n:
|
||||
raise ValueError("buffer length < offset + size")
|
||||
self._send_bytes(m[offset:offset + size])
|
||||
|
||||
def send(self, obj):
|
||||
"""Send a (picklable) object"""
|
||||
self._check_closed()
|
||||
self._check_writable()
|
||||
self._send_bytes(ForkingPickler.dumps(obj))
|
||||
|
||||
def recv_bytes(self, maxlength=None):
|
||||
"""
|
||||
Receive bytes data as a bytes object.
|
||||
"""
|
||||
self._check_closed()
|
||||
self._check_readable()
|
||||
if maxlength is not None and maxlength < 0:
|
||||
raise ValueError("negative maxlength")
|
||||
buf = self._recv_bytes(maxlength)
|
||||
if buf is None:
|
||||
self._bad_message_length()
|
||||
return buf.getvalue()
|
||||
|
||||
def recv_bytes_into(self, buf, offset=0):
|
||||
"""
|
||||
Receive bytes data into a writeable buffer-like object.
|
||||
Return the number of bytes read.
|
||||
"""
|
||||
self._check_closed()
|
||||
self._check_readable()
|
||||
with memoryview(buf) as m:
|
||||
# Get bytesize of arbitrary buffer
|
||||
itemsize = m.itemsize
|
||||
bytesize = itemsize * len(m)
|
||||
if offset < 0:
|
||||
raise ValueError("negative offset")
|
||||
elif offset > bytesize:
|
||||
raise ValueError("offset too large")
|
||||
result = self._recv_bytes()
|
||||
size = result.tell()
|
||||
if bytesize < offset + size:
|
||||
raise BufferTooShort(result.getvalue())
|
||||
# Message can fit in dest
|
||||
result.seek(0)
|
||||
result.readinto(
|
||||
m[offset // itemsize:(offset + size) // itemsize]
|
||||
)
|
||||
return size
|
||||
|
||||
def recv_payload(self):
|
||||
return self._recv_bytes().getbuffer()
|
||||
|
||||
def recv(self):
|
||||
"""Receive a (picklable) object"""
|
||||
self._check_closed()
|
||||
self._check_readable()
|
||||
buf = self._recv_bytes()
|
||||
return ForkingPickler.loads(buf.getbuffer())
|
||||
|
||||
def poll(self, timeout=0.0):
|
||||
"""Whether there is any input available to be read"""
|
||||
self._check_closed()
|
||||
self._check_readable()
|
||||
return self._poll(timeout)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
self.close()
|
||||
|
||||
|
||||
if _winapi:
|
||||
|
||||
class PipeConnection(_ConnectionBase):
|
||||
"""
|
||||
Connection class based on a Windows named pipe.
|
||||
Overlapped I/O is used, so the handles must have been created
|
||||
with FILE_FLAG_OVERLAPPED.
|
||||
"""
|
||||
_got_empty_message = False
|
||||
|
||||
def _close(self, _CloseHandle=_winapi.CloseHandle):
|
||||
_CloseHandle(self._handle)
|
||||
|
||||
def _send_bytes(self, buf):
|
||||
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
|
||||
try:
|
||||
if err == _winapi.ERROR_IO_PENDING:
|
||||
waitres = _winapi.WaitForMultipleObjects(
|
||||
[ov.event], False, INFINITE)
|
||||
assert waitres == WAIT_OBJECT_0
|
||||
except:
|
||||
ov.cancel()
|
||||
raise
|
||||
finally:
|
||||
nwritten, err = ov.GetOverlappedResult(True)
|
||||
assert err == 0
|
||||
assert nwritten == len(buf)
|
||||
|
||||
def _recv_bytes(self, maxsize=None):
|
||||
if self._got_empty_message:
|
||||
self._got_empty_message = False
|
||||
return io.BytesIO()
|
||||
else:
|
||||
bsize = 128 if maxsize is None else min(maxsize, 128)
|
||||
try:
|
||||
ov, err = _winapi.ReadFile(self._handle, bsize,
|
||||
overlapped=True)
|
||||
try:
|
||||
if err == _winapi.ERROR_IO_PENDING:
|
||||
waitres = _winapi.WaitForMultipleObjects(
|
||||
[ov.event], False, INFINITE)
|
||||
assert waitres == WAIT_OBJECT_0
|
||||
except:
|
||||
ov.cancel()
|
||||
raise
|
||||
finally:
|
||||
nread, err = ov.GetOverlappedResult(True)
|
||||
if err == 0:
|
||||
f = io.BytesIO()
|
||||
f.write(ov.getbuffer())
|
||||
return f
|
||||
elif err == _winapi.ERROR_MORE_DATA:
|
||||
return self._get_more_data(ov, maxsize)
|
||||
except OSError as e:
|
||||
if e.winerror == _winapi.ERROR_BROKEN_PIPE:
|
||||
raise EOFError
|
||||
else:
|
||||
raise
|
||||
raise RuntimeError(
|
||||
"shouldn't get here; expected KeyboardInterrupt"
|
||||
)
|
||||
|
||||
def _poll(self, timeout):
|
||||
if (self._got_empty_message or
|
||||
_winapi.PeekNamedPipe(self._handle)[0] != 0):
|
||||
return True
|
||||
return bool(wait([self], timeout))
|
||||
|
||||
def _get_more_data(self, ov, maxsize):
|
||||
buf = ov.getbuffer()
|
||||
f = io.BytesIO()
|
||||
f.write(buf)
|
||||
left = _winapi.PeekNamedPipe(self._handle)[1]
|
||||
assert left > 0
|
||||
if maxsize is not None and len(buf) + left > maxsize:
|
||||
self._bad_message_length()
|
||||
ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
|
||||
rbytes, err = ov.GetOverlappedResult(True)
|
||||
assert err == 0
|
||||
assert rbytes == left
|
||||
f.write(ov.getbuffer())
|
||||
return f
|
||||
|
||||
|
||||
class Connection(_ConnectionBase):
|
||||
"""
|
||||
Connection class based on an arbitrary file descriptor (Unix only), or
|
||||
a socket handle (Windows).
|
||||
"""
|
||||
|
||||
if _winapi:
|
||||
def _close(self, _close=_multiprocessing.closesocket):
|
||||
_close(self._handle)
|
||||
_write = _multiprocessing.send
|
||||
_read = _multiprocessing.recv
|
||||
else:
|
||||
def _close(self, _close=os.close): # noqa
|
||||
_close(self._handle)
|
||||
_write = os.write
|
||||
_read = os.read
|
||||
|
||||
def send_offset(self, buf, offset, write=_write):
|
||||
return write(self._handle, buf[offset:])
|
||||
|
||||
def _send(self, buf, write=_write):
|
||||
remaining = len(buf)
|
||||
while True:
|
||||
try:
|
||||
n = write(self._handle, buf)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
remaining -= n
|
||||
if remaining == 0:
|
||||
break
|
||||
buf = buf[n:]
|
||||
|
||||
def setblocking(self, blocking):
|
||||
setblocking(self._handle, blocking)
|
||||
|
||||
def _recv(self, size, read=_read):
|
||||
buf = io.BytesIO()
|
||||
handle = self._handle
|
||||
remaining = size
|
||||
while remaining > 0:
|
||||
try:
|
||||
chunk = read(handle, remaining)
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
n = len(chunk)
|
||||
if n == 0:
|
||||
if remaining == size:
|
||||
raise EOFError
|
||||
else:
|
||||
raise OSError("got end of file during message")
|
||||
buf.write(chunk)
|
||||
remaining -= n
|
||||
return buf
|
||||
|
||||
def _send_bytes(self, buf):
|
||||
# For wire compatibility with 3.2 and lower
|
||||
n = len(buf)
|
||||
self._send(struct.pack("!i", n))
|
||||
# The condition is necessary to avoid "broken pipe" errors
|
||||
# when sending a 0-length buffer if the other end closed the pipe.
|
||||
if n > 0:
|
||||
self._send(buf)
|
||||
|
||||
def _recv_bytes(self, maxsize=None):
|
||||
buf = self._recv(4)
|
||||
size, = struct.unpack("!i", buf.getvalue())
|
||||
if maxsize is not None and size > maxsize:
|
||||
return None
|
||||
return self._recv(size)
|
||||
|
||||
def _poll(self, timeout):
|
||||
r = wait([self], timeout)
|
||||
return bool(r)
|
||||
|
||||
|
||||
#
|
||||
# Public functions
|
||||
#
|
||||
|
||||
class Listener(object):
|
||||
'''
|
||||
Returns a listener object.
|
||||
|
||||
This is a wrapper for a bound socket which is 'listening' for
|
||||
connections, or for a Windows named pipe.
|
||||
'''
|
||||
def __init__(self, address=None, family=None, backlog=1, authkey=None):
|
||||
family = (family or (address and address_type(address))
|
||||
or default_family)
|
||||
address = address or arbitrary_address(family)
|
||||
|
||||
_validate_family(family)
|
||||
if family == 'AF_PIPE':
|
||||
self._listener = PipeListener(address, backlog)
|
||||
else:
|
||||
self._listener = SocketListener(address, family, backlog)
|
||||
|
||||
if authkey is not None and not isinstance(authkey, bytes):
|
||||
raise TypeError('authkey should be a byte string')
|
||||
|
||||
self._authkey = authkey
|
||||
|
||||
def accept(self):
|
||||
'''
|
||||
Accept a connection on the bound socket or named pipe of `self`.
|
||||
|
||||
Returns a `Connection` object.
|
||||
'''
|
||||
if self._listener is None:
|
||||
raise OSError('listener is closed')
|
||||
c = self._listener.accept()
|
||||
if self._authkey:
|
||||
deliver_challenge(c, self._authkey)
|
||||
answer_challenge(c, self._authkey)
|
||||
return c
|
||||
|
||||
def close(self):
|
||||
'''
|
||||
Close the bound socket or named pipe of `self`.
|
||||
'''
|
||||
if self._listener is not None:
|
||||
self._listener.close()
|
||||
self._listener = None
|
||||
|
||||
address = property(lambda self: self._listener._address)
|
||||
last_accepted = property(lambda self: self._listener._last_accepted)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_tb):
|
||||
self.close()
|
||||
|
||||
|
||||
def Client(address, family=None, authkey=None):
|
||||
'''
|
||||
Returns a connection to the address of a `Listener`
|
||||
'''
|
||||
family = family or address_type(address)
|
||||
_validate_family(family)
|
||||
if family == 'AF_PIPE':
|
||||
c = PipeClient(address)
|
||||
else:
|
||||
c = SocketClient(address)
|
||||
|
||||
if authkey is not None and not isinstance(authkey, bytes):
|
||||
raise TypeError('authkey should be a byte string')
|
||||
|
||||
if authkey is not None:
|
||||
answer_challenge(c, authkey)
|
||||
deliver_challenge(c, authkey)
|
||||
|
||||
return c
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
|
||||
def Pipe(duplex=True, rnonblock=False, wnonblock=False):
|
||||
'''
|
||||
Returns pair of connection objects at either end of a pipe
|
||||
'''
|
||||
if duplex:
|
||||
s1, s2 = socket.socketpair()
|
||||
s1.setblocking(not rnonblock)
|
||||
s2.setblocking(not wnonblock)
|
||||
c1 = Connection(s1.detach())
|
||||
c2 = Connection(s2.detach())
|
||||
else:
|
||||
fd1, fd2 = os.pipe()
|
||||
if rnonblock:
|
||||
setblocking(fd1, 0)
|
||||
if wnonblock:
|
||||
setblocking(fd2, 0)
|
||||
c1 = Connection(fd1, writable=False)
|
||||
c2 = Connection(fd2, readable=False)
|
||||
|
||||
return c1, c2
|
||||
|
||||
else:
|
||||
from billiard.forking import duplicate
|
||||
|
||||
def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa
|
||||
'''
|
||||
Returns pair of connection objects at either end of a pipe
|
||||
'''
|
||||
address = arbitrary_address('AF_PIPE')
|
||||
if duplex:
|
||||
openmode = _winapi.PIPE_ACCESS_DUPLEX
|
||||
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = BUFSIZE, BUFSIZE
|
||||
else:
|
||||
openmode = _winapi.PIPE_ACCESS_INBOUND
|
||||
access = _winapi.GENERIC_WRITE
|
||||
obsize, ibsize = 0, BUFSIZE
|
||||
|
||||
h1 = _winapi.CreateNamedPipe(
|
||||
address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
|
||||
_winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
|
||||
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
||||
_winapi.PIPE_WAIT,
|
||||
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
|
||||
)
|
||||
h2 = _winapi.CreateFile(
|
||||
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
||||
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
|
||||
)
|
||||
_winapi.SetNamedPipeHandleState(
|
||||
h2, _winapi.PIPE_READMODE_MESSAGE, None, None
|
||||
)
|
||||
|
||||
overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
|
||||
_, err = overlapped.GetOverlappedResult(True)
|
||||
assert err == 0
|
||||
|
||||
c1 = PipeConnection(duplicate(h1, inheritable=True), writable=duplex)
|
||||
c2 = PipeConnection(duplicate(h2, inheritable=True), readable=duplex)
|
||||
_winapi.CloseHandle(h1)
|
||||
_winapi.CloseHandle(h2)
|
||||
return c1, c2
|
||||
|
||||
#
|
||||
# Definitions for connections based on sockets
|
||||
#
|
||||
|
||||
|
||||
class SocketListener(object):
|
||||
'''
|
||||
Representation of a socket which is bound to an address and listening
|
||||
'''
|
||||
def __init__(self, address, family, backlog=1):
|
||||
self._socket = socket.socket(getattr(socket, family))
|
||||
try:
|
||||
# SO_REUSEADDR has different semantics on Windows (issue #2550).
|
||||
if os.name == 'posix':
|
||||
self._socket.setsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR, 1)
|
||||
self._socket.setblocking(True)
|
||||
self._socket.bind(address)
|
||||
self._socket.listen(backlog)
|
||||
self._address = self._socket.getsockname()
|
||||
except OSError:
|
||||
self._socket.close()
|
||||
raise
|
||||
self._family = family
|
||||
self._last_accepted = None
|
||||
|
||||
if family == 'AF_UNIX':
|
||||
self._unlink = Finalize(
|
||||
self, os.unlink, args=(address, ), exitpriority=0
|
||||
)
|
||||
else:
|
||||
self._unlink = None
|
||||
|
||||
def accept(self):
|
||||
while True:
|
||||
try:
|
||||
s, self._last_accepted = self._socket.accept()
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
else:
|
||||
break
|
||||
s.setblocking(True)
|
||||
return Connection(s.detach())
|
||||
|
||||
def close(self):
|
||||
self._socket.close()
|
||||
if self._unlink is not None:
|
||||
self._unlink()
|
||||
|
||||
|
||||
def SocketClient(address):
|
||||
'''
|
||||
Return a connection object connected to the socket given by `address`
|
||||
'''
|
||||
family = address_type(address)
|
||||
with socket.socket(getattr(socket, family)) as s:
|
||||
s.setblocking(True)
|
||||
s.connect(address)
|
||||
return Connection(s.detach())
|
||||
|
||||
#
|
||||
# Definitions for connections based on named pipes
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
class PipeListener(object):
|
||||
'''
|
||||
Representation of a named pipe
|
||||
'''
|
||||
def __init__(self, address, backlog=None):
|
||||
self._address = address
|
||||
self._handle_queue = [self._new_handle(first=True)]
|
||||
|
||||
self._last_accepted = None
|
||||
sub_debug('listener created with address=%r', self._address)
|
||||
self.close = Finalize(
|
||||
self, PipeListener._finalize_pipe_listener,
|
||||
args=(self._handle_queue, self._address), exitpriority=0
|
||||
)
|
||||
|
||||
def _new_handle(self, first=False):
|
||||
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
||||
if first:
|
||||
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
return _winapi.CreateNamedPipe(
|
||||
self._address, flags,
|
||||
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
||||
_winapi.PIPE_WAIT,
|
||||
_winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
|
||||
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
|
||||
)
|
||||
|
||||
def accept(self):
|
||||
self._handle_queue.append(self._new_handle())
|
||||
handle = self._handle_queue.pop(0)
|
||||
try:
|
||||
ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
|
||||
except OSError as e:
|
||||
if e.winerror != _winapi.ERROR_NO_DATA:
|
||||
raise
|
||||
# ERROR_NO_DATA can occur if a client has already connected,
|
||||
# written data and then disconnected -- see Issue 14725.
|
||||
else:
|
||||
try:
|
||||
_winapi.WaitForMultipleObjects([ov.event], False, INFINITE)
|
||||
except:
|
||||
ov.cancel()
|
||||
_winapi.CloseHandle(handle)
|
||||
raise
|
||||
finally:
|
||||
_, err = ov.GetOverlappedResult(True)
|
||||
assert err == 0
|
||||
return PipeConnection(handle)
|
||||
|
||||
@staticmethod
|
||||
def _finalize_pipe_listener(queue, address):
|
||||
sub_debug('closing listener with address=%r', address)
|
||||
for handle in queue:
|
||||
_winapi.CloseHandle(handle)
|
||||
|
||||
def PipeClient(address,
|
||||
errors=(_winapi.ERROR_SEM_TIMEOUT,
|
||||
_winapi.ERROR_PIPE_BUSY)):
|
||||
'''
|
||||
Return a connection object connected to the pipe given by `address`
|
||||
'''
|
||||
t = _init_timeout()
|
||||
while 1:
|
||||
try:
|
||||
_winapi.WaitNamedPipe(address, 1000)
|
||||
h = _winapi.CreateFile(
|
||||
address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
|
||||
0, _winapi.NULL, _winapi.OPEN_EXISTING,
|
||||
_winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
|
||||
)
|
||||
except OSError as e:
|
||||
if e.winerror not in errors or _check_timeout(t):
|
||||
raise
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
_winapi.SetNamedPipeHandleState(
|
||||
h, _winapi.PIPE_READMODE_MESSAGE, None, None
|
||||
)
|
||||
return PipeConnection(h)
|
||||
|
||||
#
|
||||
# Authentication stuff
|
||||
#
|
||||
|
||||
MESSAGE_LENGTH = 20
|
||||
|
||||
CHALLENGE = b'#CHALLENGE#'
|
||||
WELCOME = b'#WELCOME#'
|
||||
FAILURE = b'#FAILURE#'
|
||||
|
||||
|
||||
def deliver_challenge(connection, authkey):
|
||||
import hmac
|
||||
assert isinstance(authkey, bytes)
|
||||
message = os.urandom(MESSAGE_LENGTH)
|
||||
connection.send_bytes(CHALLENGE + message)
|
||||
digest = hmac.new(authkey, message).digest()
|
||||
response = connection.recv_bytes(256) # reject large message
|
||||
if response == digest:
|
||||
connection.send_bytes(WELCOME)
|
||||
else:
|
||||
connection.send_bytes(FAILURE)
|
||||
raise AuthenticationError('digest received was wrong')
|
||||
|
||||
|
||||
def answer_challenge(connection, authkey):
|
||||
import hmac
|
||||
assert isinstance(authkey, bytes)
|
||||
message = connection.recv_bytes(256) # reject large message
|
||||
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
|
||||
message = message[len(CHALLENGE):]
|
||||
digest = hmac.new(authkey, message).digest()
|
||||
connection.send_bytes(digest)
|
||||
response = connection.recv_bytes(256) # reject large message
|
||||
if response != WELCOME:
|
||||
raise AuthenticationError('digest sent was rejected')
|
||||
|
||||
#
|
||||
# Support for using xmlrpclib for serialization
|
||||
#
|
||||
|
||||
|
||||
class ConnectionWrapper(object):
|
||||
|
||||
def __init__(self, conn, dumps, loads):
|
||||
self._conn = conn
|
||||
self._dumps = dumps
|
||||
self._loads = loads
|
||||
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
|
||||
obj = getattr(conn, attr)
|
||||
setattr(self, attr, obj)
|
||||
|
||||
def send(self, obj):
|
||||
s = self._dumps(obj)
|
||||
self._conn.send_bytes(s)
|
||||
|
||||
def recv(self):
|
||||
s = self._conn.recv_bytes()
|
||||
return self._loads(s)
|
||||
|
||||
|
||||
def _xml_dumps(obj):
|
||||
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') # noqa
|
||||
|
||||
|
||||
def _xml_loads(s):
|
||||
(obj,), method = xmlrpclib.loads(s.decode('utf-8')) # noqa
|
||||
return obj
|
||||
|
||||
|
||||
class XmlListener(Listener):
|
||||
def accept(self):
|
||||
global xmlrpclib
|
||||
import xmlrpc.client as xmlrpclib # noqa
|
||||
obj = Listener.accept(self)
|
||||
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
|
||||
|
||||
|
||||
def XmlClient(*args, **kwds):
|
||||
global xmlrpclib
|
||||
import xmlrpc.client as xmlrpclib # noqa
|
||||
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
|
||||
|
||||
#
|
||||
# Wait
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def _exhaustive_wait(handles, timeout):
|
||||
# Return ALL handles which are currently signalled. (Only
|
||||
# returning the first signalled might create starvation issues.)
|
||||
L = list(handles)
|
||||
ready = []
|
||||
while L:
|
||||
res = _winapi.WaitForMultipleObjects(L, False, timeout)
|
||||
if res == WAIT_TIMEOUT:
|
||||
break
|
||||
elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
|
||||
res -= WAIT_OBJECT_0
|
||||
elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
|
||||
res -= WAIT_ABANDONED_0
|
||||
else:
|
||||
raise RuntimeError('Should not get here')
|
||||
ready.append(L[res])
|
||||
L = L[res+1:]
|
||||
timeout = 0
|
||||
return ready
|
||||
|
||||
_ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
|
||||
|
||||
def wait(object_list, timeout=None):
|
||||
'''
|
||||
Wait till an object in object_list is ready/readable.
|
||||
|
||||
Returns list of those objects in object_list which are ready/readable.
|
||||
'''
|
||||
if timeout is None:
|
||||
timeout = INFINITE
|
||||
elif timeout < 0:
|
||||
timeout = 0
|
||||
else:
|
||||
timeout = int(timeout * 1000 + 0.5)
|
||||
|
||||
object_list = list(object_list)
|
||||
waithandle_to_obj = {}
|
||||
ov_list = []
|
||||
ready_objects = set()
|
||||
ready_handles = set()
|
||||
|
||||
try:
|
||||
for o in object_list:
|
||||
try:
|
||||
fileno = getattr(o, 'fileno')
|
||||
except AttributeError:
|
||||
waithandle_to_obj[o.__index__()] = o
|
||||
else:
|
||||
# start an overlapped read of length zero
|
||||
try:
|
||||
ov, err = _winapi.ReadFile(fileno(), 0, True)
|
||||
except OSError as e:
|
||||
err = e.winerror
|
||||
if err not in _ready_errors:
|
||||
raise
|
||||
if err == _winapi.ERROR_IO_PENDING:
|
||||
ov_list.append(ov)
|
||||
waithandle_to_obj[ov.event] = o
|
||||
else:
|
||||
# If o.fileno() is an overlapped pipe handle and
|
||||
# err == 0 then there is a zero length message
|
||||
# in the pipe, but it HAS NOT been consumed.
|
||||
ready_objects.add(o)
|
||||
timeout = 0
|
||||
|
||||
ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
|
||||
finally:
|
||||
# request that overlapped reads stop
|
||||
for ov in ov_list:
|
||||
ov.cancel()
|
||||
|
||||
# wait for all overlapped reads to stop
|
||||
for ov in ov_list:
|
||||
try:
|
||||
_, err = ov.GetOverlappedResult(True)
|
||||
except OSError as e:
|
||||
err = e.winerror
|
||||
if err not in _ready_errors:
|
||||
raise
|
||||
if err != _winapi.ERROR_OPERATION_ABORTED:
|
||||
o = waithandle_to_obj[ov.event]
|
||||
ready_objects.add(o)
|
||||
if err == 0:
|
||||
# If o.fileno() is an overlapped pipe handle then
|
||||
# a zero length message HAS been consumed.
|
||||
if hasattr(o, '_got_empty_message'):
|
||||
o._got_empty_message = True
|
||||
|
||||
ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
|
||||
return [o for o in object_list if o in ready_objects]
|
||||
|
||||
else:
|
||||
|
||||
if hasattr(select, 'poll'):
|
||||
def _poll(fds, timeout):
|
||||
if timeout is not None:
|
||||
timeout = int(timeout * 1000) # timeout is in milliseconds
|
||||
fd_map = {}
|
||||
pollster = select.poll()
|
||||
for fd in fds:
|
||||
pollster.register(fd, select.POLLIN)
|
||||
if hasattr(fd, 'fileno'):
|
||||
fd_map[fd.fileno()] = fd
|
||||
else:
|
||||
fd_map[fd] = fd
|
||||
ls = []
|
||||
for fd, event in pollster.poll(timeout):
|
||||
if event & select.POLLNVAL:
|
||||
raise ValueError('invalid file descriptor %i' % fd)
|
||||
ls.append(fd_map[fd])
|
||||
return ls
|
||||
else:
|
||||
def _poll(fds, timeout): # noqa
|
||||
return select.select(fds, [], [], timeout)[0]
|
||||
|
||||
def wait(object_list, timeout=None): # noqa
|
||||
'''
|
||||
Wait till an object in object_list is ready/readable.
|
||||
|
||||
Returns list of those objects in object_list which are ready/readable.
|
||||
'''
|
||||
if timeout is not None:
|
||||
if timeout <= 0:
|
||||
return _poll(object_list, 0)
|
||||
else:
|
||||
deadline = monotonic() + timeout
|
||||
while True:
|
||||
try:
|
||||
return _poll(object_list, timeout)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
if timeout is not None:
|
||||
timeout = deadline - monotonic()
|
||||
@ -1,249 +0,0 @@
|
||||
#
|
||||
# Module which deals with pickling of objects.
|
||||
#
|
||||
# multiprocessing/reduction.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
#
|
||||
from __future__ import absolute_import
|
||||
|
||||
import copyreg
|
||||
import functools
|
||||
import io
|
||||
import os
|
||||
import pickle
|
||||
import socket
|
||||
import sys
|
||||
|
||||
__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
|
||||
|
||||
|
||||
HAVE_SEND_HANDLE = (sys.platform == 'win32' or
|
||||
(hasattr(socket, 'CMSG_LEN') and
|
||||
hasattr(socket, 'SCM_RIGHTS') and
|
||||
hasattr(socket.socket, 'sendmsg')))
|
||||
|
||||
#
|
||||
# Pickler subclass
|
||||
#
|
||||
|
||||
|
||||
class ForkingPickler(pickle.Pickler):
|
||||
'''Pickler subclass used by multiprocessing.'''
|
||||
_extra_reducers = {}
|
||||
_copyreg_dispatch_table = copyreg.dispatch_table
|
||||
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
self.dispatch_table = self._copyreg_dispatch_table.copy()
|
||||
self.dispatch_table.update(self._extra_reducers)
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce):
|
||||
'''Register a reduce function for a type.'''
|
||||
cls._extra_reducers[type] = reduce
|
||||
|
||||
@classmethod
|
||||
def dumps(cls, obj, protocol=None):
|
||||
buf = io.BytesIO()
|
||||
cls(buf, protocol).dump(obj)
|
||||
return buf.getbuffer()
|
||||
|
||||
loads = pickle.loads
|
||||
|
||||
register = ForkingPickler.register
|
||||
|
||||
|
||||
def dump(obj, file, protocol=None):
|
||||
'''Replacement for pickle.dump() using ForkingPickler.'''
|
||||
ForkingPickler(file, protocol).dump(obj)
|
||||
|
||||
#
|
||||
# Platform specific definitions
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
# Windows
|
||||
__all__ += ['DupHandle', 'duplicate', 'steal_handle']
|
||||
import _winapi
|
||||
|
||||
def duplicate(handle, target_process=None, inheritable=False):
|
||||
'''Duplicate a handle. (target_process is a handle not a pid!)'''
|
||||
if target_process is None:
|
||||
target_process = _winapi.GetCurrentProcess()
|
||||
return _winapi.DuplicateHandle(
|
||||
_winapi.GetCurrentProcess(), handle, target_process,
|
||||
0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
|
||||
|
||||
def steal_handle(source_pid, handle):
|
||||
'''Steal a handle from process identified by source_pid.'''
|
||||
source_process_handle = _winapi.OpenProcess(
|
||||
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
|
||||
try:
|
||||
return _winapi.DuplicateHandle(
|
||||
source_process_handle, handle,
|
||||
_winapi.GetCurrentProcess(), 0, False,
|
||||
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
|
||||
finally:
|
||||
_winapi.CloseHandle(source_process_handle)
|
||||
|
||||
def send_handle(conn, handle, destination_pid):
|
||||
'''Send a handle over a local connection.'''
|
||||
dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
|
||||
conn.send(dh)
|
||||
|
||||
def recv_handle(conn):
|
||||
'''Receive a handle over a local connection.'''
|
||||
return conn.recv().detach()
|
||||
|
||||
class DupHandle(object):
|
||||
'''Picklable wrapper for a handle.'''
|
||||
def __init__(self, handle, access, pid=None):
|
||||
if pid is None:
|
||||
# We just duplicate the handle in the current process and
|
||||
# let the receiving process steal the handle.
|
||||
pid = os.getpid()
|
||||
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
|
||||
try:
|
||||
self._handle = _winapi.DuplicateHandle(
|
||||
_winapi.GetCurrentProcess(),
|
||||
handle, proc, access, False, 0)
|
||||
finally:
|
||||
_winapi.CloseHandle(proc)
|
||||
self._access = access
|
||||
self._pid = pid
|
||||
|
||||
def detach(self):
|
||||
'''Get the handle. This should only be called once.'''
|
||||
# retrieve handle from process which currently owns it
|
||||
if self._pid == os.getpid():
|
||||
# The handle has already been duplicated for this process.
|
||||
return self._handle
|
||||
# We must steal the handle from the process whose pid is self._pid.
|
||||
proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
|
||||
self._pid)
|
||||
try:
|
||||
return _winapi.DuplicateHandle(
|
||||
proc, self._handle, _winapi.GetCurrentProcess(),
|
||||
self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
|
||||
finally:
|
||||
_winapi.CloseHandle(proc)
|
||||
|
||||
else:
|
||||
# Unix
|
||||
__all__ += ['DupFd', 'sendfds', 'recvfds']
|
||||
import array
|
||||
|
||||
# On MacOSX we should acknowledge receipt of fds -- see Issue14669
|
||||
ACKNOWLEDGE = sys.platform == 'darwin'
|
||||
|
||||
def sendfds(sock, fds):
|
||||
'''Send an array of fds over an AF_UNIX socket.'''
|
||||
fds = array.array('i', fds)
|
||||
msg = bytes([len(fds) % 256])
|
||||
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
|
||||
if ACKNOWLEDGE and sock.recv(1) != b'A':
|
||||
raise RuntimeError('did not receive acknowledgement of fd')
|
||||
|
||||
def recvfds(sock, size):
|
||||
'''Receive an array of fds over an AF_UNIX socket.'''
|
||||
a = array.array('i')
|
||||
bytes_size = a.itemsize * size
|
||||
msg, ancdata, flags, addr = sock.recvmsg(
|
||||
1, socket.CMSG_LEN(bytes_size),
|
||||
)
|
||||
if not msg and not ancdata:
|
||||
raise EOFError
|
||||
try:
|
||||
if ACKNOWLEDGE:
|
||||
sock.send(b'A')
|
||||
if len(ancdata) != 1:
|
||||
raise RuntimeError(
|
||||
'received %d items of ancdata' % len(ancdata),
|
||||
)
|
||||
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
|
||||
if (cmsg_level == socket.SOL_SOCKET and
|
||||
cmsg_type == socket.SCM_RIGHTS):
|
||||
if len(cmsg_data) % a.itemsize != 0:
|
||||
raise ValueError
|
||||
a.frombytes(cmsg_data)
|
||||
assert len(a) % 256 == msg[0]
|
||||
return list(a)
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
raise RuntimeError('Invalid data received')
|
||||
|
||||
def send_handle(conn, handle, destination_pid): # noqa
|
||||
'''Send a handle over a local connection.'''
|
||||
fd = conn.fileno()
|
||||
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
|
||||
sendfds(s, [handle])
|
||||
|
||||
def recv_handle(conn): # noqa
|
||||
'''Receive a handle over a local connection.'''
|
||||
fd = conn.fileno()
|
||||
with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s:
|
||||
return recvfds(s, 1)[0]
|
||||
|
||||
def DupFd(fd):
|
||||
'''Return a wrapper for an fd.'''
|
||||
from .forking import Popen
|
||||
return Popen.duplicate_for_child(fd)
|
||||
|
||||
#
|
||||
# Try making some callable types picklable
|
||||
#
|
||||
|
||||
|
||||
def _reduce_method(m):
|
||||
if m.__self__ is None:
|
||||
return getattr, (m.__class__, m.__func__.__name__)
|
||||
else:
|
||||
return getattr, (m.__self__, m.__func__.__name__)
|
||||
|
||||
|
||||
class _C:
|
||||
def f(self):
|
||||
pass
|
||||
register(type(_C().f), _reduce_method)
|
||||
|
||||
|
||||
def _reduce_method_descriptor(m):
|
||||
return getattr, (m.__objclass__, m.__name__)
|
||||
register(type(list.append), _reduce_method_descriptor)
|
||||
register(type(int.__add__), _reduce_method_descriptor)
|
||||
|
||||
|
||||
def _reduce_partial(p):
|
||||
return _rebuild_partial, (p.func, p.args, p.keywords or {})
|
||||
|
||||
|
||||
def _rebuild_partial(func, args, keywords):
|
||||
return functools.partial(func, *args, **keywords)
|
||||
register(functools.partial, _reduce_partial)
|
||||
|
||||
#
|
||||
# Make sockets picklable
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def _reduce_socket(s):
|
||||
from .resource_sharer import DupSocket
|
||||
return _rebuild_socket, (DupSocket(s),)
|
||||
|
||||
def _rebuild_socket(ds):
|
||||
return ds.detach()
|
||||
register(socket.socket, _reduce_socket)
|
||||
|
||||
else:
|
||||
|
||||
def _reduce_socket(s): # noqa
|
||||
df = DupFd(s.fileno())
|
||||
return _rebuild_socket, (df, s.family, s.type, s.proto)
|
||||
|
||||
def _rebuild_socket(df, family, type, proto): # noqa
|
||||
fd = df.detach()
|
||||
return socket.socket(family, type, proto, fileno=fd)
|
||||
register(socket.socket, _reduce_socket)
|
||||
@ -4,7 +4,7 @@ import errno
|
||||
import os
|
||||
import sys
|
||||
|
||||
from .five import builtins, range
|
||||
from .five import range
|
||||
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
@ -45,10 +45,9 @@ else: # non-posix platform
|
||||
if sys.version_info[0] == 3:
|
||||
bytes = bytes
|
||||
else:
|
||||
try:
|
||||
_bytes = builtins.bytes
|
||||
except AttributeError:
|
||||
_bytes = str
|
||||
_bytes = bytes
|
||||
|
||||
# the 'bytes' alias in Python2 does not support an encoding argument.
|
||||
|
||||
class bytes(_bytes): # noqa
|
||||
|
||||
|
||||
@ -5,9 +5,9 @@ import sys
|
||||
is_pypy = hasattr(sys, 'pypy_version_info')
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
from . import _connection3 as connection
|
||||
from .py3 import connection
|
||||
else:
|
||||
from . import _connection as connection # noqa
|
||||
from .py2 import connection # noqa
|
||||
|
||||
|
||||
if is_pypy:
|
||||
|
||||
@ -433,6 +433,8 @@ def main():
|
||||
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
|
||||
except KeyError:
|
||||
pass
|
||||
except AttributeError:
|
||||
pass
|
||||
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
|
||||
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
|
||||
format = os.environ.get("_MP_FORK_LOGFORMAT_")
|
||||
|
||||
@ -1096,7 +1096,8 @@ class Pool(object):
|
||||
if popen is None or exitcode is not None:
|
||||
# worker exited
|
||||
debug('Supervisor: cleaning up worker %d', i)
|
||||
worker.join()
|
||||
if popen is not None:
|
||||
worker.join()
|
||||
debug('Supervisor: worked %d joined', i)
|
||||
cleaned[worker.pid] = worker
|
||||
exitcodes[worker.pid] = exitcode
|
||||
@ -1591,7 +1592,8 @@ class Pool(object):
|
||||
if p.is_alive():
|
||||
# worker has not yet exited
|
||||
debug('cleaning up worker %d', p.pid)
|
||||
p.join()
|
||||
if p._popen is not None:
|
||||
p.join()
|
||||
debug('pool workers joined')
|
||||
|
||||
@property
|
||||
|
||||
0
awx/lib/site-packages/billiard/py2/__init__.py
Normal file
0
awx/lib/site-packages/billiard/py2/__init__.py
Normal file
@ -19,14 +19,14 @@ import time
|
||||
import tempfile
|
||||
import itertools
|
||||
|
||||
from . import AuthenticationError
|
||||
from . import reduction
|
||||
from ._ext import _billiard, win32
|
||||
from .compat import get_errno, bytes, setblocking
|
||||
from .five import monotonic
|
||||
from .forking import duplicate, close
|
||||
from .reduction import ForkingPickler
|
||||
from .util import get_temp_dir, Finalize, sub_debug, debug
|
||||
from .. import AuthenticationError
|
||||
from .. import reduction
|
||||
from .._ext import _billiard, win32
|
||||
from ..compat import get_errno, setblocking, bytes as cbytes
|
||||
from ..five import monotonic
|
||||
from ..forking import duplicate, close
|
||||
from ..reduction import ForkingPickler
|
||||
from ..util import get_temp_dir, Finalize, sub_debug, debug
|
||||
|
||||
try:
|
||||
WindowsError = WindowsError # noqa
|
||||
@ -406,9 +406,9 @@ if sys.platform == 'win32':
|
||||
|
||||
MESSAGE_LENGTH = 20
|
||||
|
||||
CHALLENGE = bytes('#CHALLENGE#', 'ascii')
|
||||
WELCOME = bytes('#WELCOME#', 'ascii')
|
||||
FAILURE = bytes('#FAILURE#', 'ascii')
|
||||
CHALLENGE = cbytes('#CHALLENGE#', 'ascii')
|
||||
WELCOME = cbytes('#WELCOME#', 'ascii')
|
||||
FAILURE = cbytes('#FAILURE#', 'ascii')
|
||||
|
||||
|
||||
def deliver_challenge(connection, authkey):
|
||||
@ -19,9 +19,9 @@ import threading
|
||||
|
||||
from pickle import Pickler
|
||||
|
||||
from . import current_process
|
||||
from ._ext import _billiard, win32
|
||||
from .util import register_after_fork, debug, sub_debug
|
||||
from .. import current_process
|
||||
from .._ext import _billiard, win32
|
||||
from ..util import register_after_fork, debug, sub_debug
|
||||
|
||||
is_win32 = sys.platform == 'win32'
|
||||
is_pypy = hasattr(sys, 'pypy_version_info')
|
||||
@ -92,7 +92,7 @@ if sys.platform == 'win32':
|
||||
import _subprocess # noqa
|
||||
|
||||
def send_handle(conn, handle, destination_pid):
|
||||
from .forking import duplicate
|
||||
from ..forking import duplicate
|
||||
process_handle = win32.OpenProcess(
|
||||
win32.PROCESS_ALL_ACCESS, False, destination_pid
|
||||
)
|
||||
@ -136,7 +136,7 @@ def _get_listener():
|
||||
_lock.acquire()
|
||||
try:
|
||||
if _listener is None:
|
||||
from .connection import Listener
|
||||
from ..connection import Listener
|
||||
debug('starting listener and thread for sending handles')
|
||||
_listener = Listener(authkey=current_process().authkey)
|
||||
t = threading.Thread(target=_serve)
|
||||
@ -149,7 +149,7 @@ def _get_listener():
|
||||
|
||||
|
||||
def _serve():
|
||||
from .util import is_exiting, sub_warning
|
||||
from ..util import is_exiting, sub_warning
|
||||
|
||||
while 1:
|
||||
try:
|
||||
@ -170,7 +170,7 @@ def _serve():
|
||||
|
||||
|
||||
def reduce_handle(handle):
|
||||
from .forking import Popen, duplicate
|
||||
from ..forking import Popen, duplicate
|
||||
if Popen.thread_is_spawning():
|
||||
return (None, Popen.duplicate_for_child(handle), True)
|
||||
dup_handle = duplicate(handle)
|
||||
@ -180,7 +180,7 @@ def reduce_handle(handle):
|
||||
|
||||
|
||||
def rebuild_handle(pickled_data):
|
||||
from .connection import Client
|
||||
from ..connection import Client
|
||||
address, handle, inherited = pickled_data
|
||||
if inherited:
|
||||
return handle
|
||||
@ -3,8 +3,8 @@ from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
if sys.version_info[0] == 3:
|
||||
from . import _reduction3 as reduction
|
||||
from .py3 import reduction
|
||||
else:
|
||||
from . import _reduction as reduction # noqa
|
||||
from .py2 import reduction # noqa
|
||||
|
||||
sys.modules[__name__] = reduction
|
||||
|
||||
@ -37,7 +37,7 @@ import logging.config
|
||||
import urlparse
|
||||
from boto.exception import InvalidUriError
|
||||
|
||||
__version__ = '2.21.2'
|
||||
__version__ = '2.27.0'
|
||||
Version = __version__ # for backware compatibility
|
||||
|
||||
# http://bugs.python.org/issue7980
|
||||
@ -58,6 +58,7 @@ TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
|
||||
GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)'
|
||||
r'#(?P<generation>[0-9]+)$')
|
||||
VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$')
|
||||
ENDPOINTS_PATH = os.path.join(os.path.dirname(__file__), 'endpoints.json')
|
||||
|
||||
|
||||
def init_logging():
|
||||
@ -195,6 +196,11 @@ def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None,
|
||||
|
||||
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
|
||||
:return: A connection to Amazon's Auto Scaling Service
|
||||
|
||||
:type use_block_device_types bool
|
||||
:param use_block_device_types: Specifies whether to return described Launch Configs with block device mappings containing
|
||||
block device types, or a list of old style block device mappings (deprecated). This defaults to false for compatability
|
||||
with the old incorrect style.
|
||||
"""
|
||||
from boto.ec2.autoscale import AutoScaleConnection
|
||||
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key,
|
||||
@ -311,6 +317,25 @@ def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
|
||||
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
|
||||
|
||||
|
||||
def connect_rds2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
|
||||
"""
|
||||
:type aws_access_key_id: string
|
||||
:param aws_access_key_id: Your AWS Access Key ID
|
||||
|
||||
:type aws_secret_access_key: string
|
||||
:param aws_secret_access_key: Your AWS Secret Access Key
|
||||
|
||||
:rtype: :class:`boto.rds2.layer1.RDSConnection`
|
||||
:return: A connection to RDS
|
||||
"""
|
||||
from boto.rds2.layer1 import RDSConnection
|
||||
return RDSConnection(
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
|
||||
"""
|
||||
:type aws_access_key_id: string
|
||||
|
||||
@ -36,6 +36,7 @@ import copy
|
||||
import datetime
|
||||
from email.utils import formatdate
|
||||
import hmac
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
@ -220,7 +221,6 @@ class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
|
||||
Select the headers from the request that need to be included
|
||||
in the StringToSign.
|
||||
"""
|
||||
headers_to_sign = {}
|
||||
headers_to_sign = {'Host': self.host}
|
||||
for name, value in http_request.headers.items():
|
||||
lname = name.lower()
|
||||
@ -329,7 +329,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
parameter_names = sorted(http_request.params.keys())
|
||||
pairs = []
|
||||
for pname in parameter_names:
|
||||
pval = str(http_request.params[pname]).encode('utf-8')
|
||||
pval = boto.utils.get_utf8_value(http_request.params[pname])
|
||||
pairs.append(urllib.quote(pname, safe='') + '=' +
|
||||
urllib.quote(pval, safe='-_~'))
|
||||
return '&'.join(pairs)
|
||||
@ -341,7 +341,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
return ""
|
||||
l = []
|
||||
for param in sorted(http_request.params):
|
||||
value = str(http_request.params[param])
|
||||
value = boto.utils.get_utf8_value(http_request.params[param])
|
||||
l.append('%s=%s' % (urllib.quote(param, safe='-_.~'),
|
||||
urllib.quote(value, safe='-_.~')))
|
||||
return '&'.join(l)
|
||||
@ -358,9 +358,11 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
for header in headers_to_sign:
|
||||
c_name = header.lower().strip()
|
||||
raw_value = headers_to_sign[header]
|
||||
c_value = ' '.join(raw_value.strip().split())
|
||||
if '"' in raw_value:
|
||||
c_value = raw_value.strip()
|
||||
else:
|
||||
c_value = ' '.join(raw_value.strip().split())
|
||||
canonical.append('%s:%s' % (c_name, c_value))
|
||||
|
||||
return '\n'.join(sorted(canonical))
|
||||
|
||||
def signed_headers(self, headers_to_sign):
|
||||
@ -498,7 +500,10 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
# Safe to modify req.path here since
|
||||
# the signature will use req.auth_path.
|
||||
req.path = req.path.split('?')[0]
|
||||
req.path = req.path + '?' + qs
|
||||
|
||||
if qs:
|
||||
# Don't insert the '?' unless there's actually a query string
|
||||
req.path = req.path + '?' + qs
|
||||
canonical_request = self.canonical_request(req)
|
||||
boto.log.debug('CanonicalRequest:\n%s' % canonical_request)
|
||||
string_to_sign = self.string_to_sign(req, canonical_request)
|
||||
@ -534,7 +539,10 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
|
||||
# S3 does **NOT** do path normalization that SigV4 typically does.
|
||||
# Urlencode the path, **NOT** ``auth_path`` (because vhosting).
|
||||
path = urlparse.urlparse(http_request.path)
|
||||
encoded = urllib.quote(path.path)
|
||||
# Because some quoting may have already been applied, let's back it out.
|
||||
unquoted = urllib.unquote(path.path)
|
||||
# Requote, this time addressing all characters.
|
||||
encoded = urllib.quote(unquoted)
|
||||
return encoded
|
||||
|
||||
def host_header(self, host, http_request):
|
||||
@ -889,6 +897,12 @@ def get_auth_handler(host, config, provider, requested_capability=None):
|
||||
|
||||
def detect_potential_sigv4(func):
|
||||
def _wrapper(self):
|
||||
if os.environ.get('EC2_USE_SIGV4', False):
|
||||
return ['hmac-v4']
|
||||
|
||||
if boto.config.get('ec2', 'use-sigv4', False):
|
||||
return ['hmac-v4']
|
||||
|
||||
if hasattr(self, 'region'):
|
||||
if getattr(self.region, 'endpoint', ''):
|
||||
if '.cn-' in self.region.endpoint:
|
||||
@ -900,6 +914,12 @@ def detect_potential_sigv4(func):
|
||||
|
||||
def detect_potential_s3sigv4(func):
|
||||
def _wrapper(self):
|
||||
if os.environ.get('S3_USE_SIGV4', False):
|
||||
return ['hmac-v4-s3']
|
||||
|
||||
if boto.config.get('s3', 'use-sigv4', False):
|
||||
return ['hmac-v4-s3']
|
||||
|
||||
if hasattr(self, 'host'):
|
||||
if '.cn-' in self.host:
|
||||
return ['hmac-v4-s3']
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -31,31 +31,10 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
import boto.beanstalk.layer1
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='elasticbeanstalk.us-east-1.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='elasticbeanstalk.us-west-1.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='elasticbeanstalk.us-west-2.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='elasticbeanstalk.ap-northeast-1.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='elasticbeanstalk.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='elasticbeanstalk.ap-southeast-2.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='elasticbeanstalk.eu-west-1.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
RegionInfo(name='sa-east-1',
|
||||
endpoint='elasticbeanstalk.sa-east-1.amazonaws.com',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1),
|
||||
]
|
||||
return get_regions(
|
||||
'elasticbeanstalk',
|
||||
connection_cls=boto.beanstalk.layer1.Layer1
|
||||
)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -40,7 +40,7 @@ class Layer1(AWSQueryConnection):
|
||||
proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
api_version=None, security_token=None):
|
||||
api_version=None, security_token=None, profile_name=None):
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
@ -51,7 +51,7 @@ class Layer1(AWSQueryConnection):
|
||||
proxy_user, proxy_pass,
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token)
|
||||
security_token, profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
|
||||
@ -21,19 +21,9 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from connection import CloudFormationConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
|
||||
RegionData = {
|
||||
'us-east-1': 'cloudformation.us-east-1.amazonaws.com',
|
||||
'us-west-1': 'cloudformation.us-west-1.amazonaws.com',
|
||||
'us-west-2': 'cloudformation.us-west-2.amazonaws.com',
|
||||
'sa-east-1': 'cloudformation.sa-east-1.amazonaws.com',
|
||||
'eu-west-1': 'cloudformation.eu-west-1.amazonaws.com',
|
||||
'ap-northeast-1': 'cloudformation.ap-northeast-1.amazonaws.com',
|
||||
'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com',
|
||||
'ap-southeast-2': 'cloudformation.ap-southeast-2.amazonaws.com',
|
||||
'cn-north-1': 'cloudformation.cn-north-1.amazonaws.com.cn',
|
||||
}
|
||||
RegionData = load_regions().get('cloudformation')
|
||||
|
||||
|
||||
def regions():
|
||||
@ -43,13 +33,10 @@ def regions():
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.RegionInfo` instances
|
||||
"""
|
||||
regions = []
|
||||
for region_name in RegionData:
|
||||
region = RegionInfo(name=region_name,
|
||||
endpoint=RegionData[region_name],
|
||||
connection_cls=CloudFormationConnection)
|
||||
regions.append(region)
|
||||
return regions
|
||||
return get_regions(
|
||||
'cloudformation',
|
||||
connection_cls=CloudFormationConnection
|
||||
)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
@ -29,9 +30,28 @@ from boto.compat import json
|
||||
|
||||
|
||||
class CloudFormationConnection(AWSQueryConnection):
|
||||
|
||||
"""
|
||||
A Connection to the CloudFormation Service.
|
||||
AWS CloudFormation
|
||||
AWS CloudFormation enables you to create and manage AWS
|
||||
infrastructure deployments predictably and repeatedly. AWS
|
||||
CloudFormation helps you leverage AWS products such as Amazon EC2,
|
||||
EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable,
|
||||
highly scalable, cost effective applications without worrying
|
||||
about creating and configuring the underlying AWS infrastructure.
|
||||
|
||||
With AWS CloudFormation, you declare all of your resources and
|
||||
dependencies in a template file. The template defines a collection
|
||||
of resources as a single unit called a stack. AWS CloudFormation
|
||||
creates and deletes all member resources of the stack together and
|
||||
manages all dependencies between the resources for you.
|
||||
|
||||
For more information about this product, go to the `CloudFormation
|
||||
Product Page`_.
|
||||
|
||||
Amazon CloudFormation makes use of other AWS products. If you need
|
||||
additional technical information about a specific AWS product, you
|
||||
can find the product's technical documentation at
|
||||
`http://aws.amazon.com/documentation/`_.
|
||||
"""
|
||||
APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15')
|
||||
DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1')
|
||||
@ -52,7 +72,8 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
converter=None, security_token=None, validate_certs=True):
|
||||
converter=None, security_token=None, validate_certs=True,
|
||||
profile_name=None):
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint, CloudFormationConnection)
|
||||
@ -64,7 +85,8 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
@ -74,50 +96,117 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
return {True: "true", False: "false"}[v]
|
||||
|
||||
def _build_create_or_update_params(self, stack_name, template_body,
|
||||
template_url, parameters,
|
||||
notification_arns, disable_rollback,
|
||||
timeout_in_minutes, capabilities, tags):
|
||||
template_url, parameters, disable_rollback, timeout_in_minutes,
|
||||
notification_arns, capabilities, on_failure, stack_policy_body,
|
||||
stack_policy_url, tags, stack_policy_during_update_body=None,
|
||||
stack_policy_during_update_url=None):
|
||||
"""
|
||||
Helper that creates JSON parameters needed by a Stack Create or
|
||||
Stack Update call.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name of the Stack, must be unique amoung running
|
||||
Stacks
|
||||
:param stack_name:
|
||||
The name associated with the stack. The name must be unique within your
|
||||
AWS account.
|
||||
|
||||
Must contain only alphanumeric characters (case sensitive) and start
|
||||
with an alpha character. Maximum length of the name is 255
|
||||
characters.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: The template body (JSON string)
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: An S3 URL of a stored template JSON document. If
|
||||
both the template_body and template_url are
|
||||
specified, the template_body takes precedence
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template (max size: 307,200 bytes) located in
|
||||
an S3 bucket in the same region as the stack. For more information,
|
||||
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type parameters: list of tuples
|
||||
:param parameters: A list of (key, value) pairs for template input
|
||||
parameters.
|
||||
:type parameters: list
|
||||
:param parameters: A list of key/value tuples that specify input
|
||||
parameters for the stack.
|
||||
|
||||
:type notification_arns: list of strings
|
||||
:param notification_arns: A list of SNS topics to send Stack event
|
||||
notifications to.
|
||||
:type disable_rollback: boolean
|
||||
:param disable_rollback: Set to `True` to disable rollback of the stack
|
||||
if stack creation failed. You can specify either `DisableRollback`
|
||||
or `OnFailure`, but not both.
|
||||
Default: `False`
|
||||
|
||||
:type disable_rollback: bool
|
||||
:param disable_rollback: Indicates whether or not to rollback on
|
||||
failure.
|
||||
:type timeout_in_minutes: integer
|
||||
:param timeout_in_minutes: The amount of time that can pass before the
|
||||
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
|
||||
or is set to `False`, the stack will be rolled back.
|
||||
|
||||
:type timeout_in_minutes: int
|
||||
:param timeout_in_minutes: Maximum amount of time to let the Stack
|
||||
spend creating itself. If this timeout is exceeded,
|
||||
the Stack will enter the CREATE_FAILED state.
|
||||
:type notification_arns: list
|
||||
:param notification_arns: The Simple Notification Service (SNS) topic
|
||||
ARNs to publish stack related events. You can find your SNS topic
|
||||
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
|
||||
|
||||
:type capabilities: list
|
||||
:param capabilities: The list of capabilities you want to allow in
|
||||
the stack. Currently, the only valid capability is
|
||||
'CAPABILITY_IAM'.
|
||||
:param capabilities: The list of capabilities that you want to allow in
|
||||
the stack. If your template contains certain resources, you must
|
||||
specify the CAPABILITY_IAM value for this parameter; otherwise,
|
||||
this action returns an InsufficientCapabilities error. The
|
||||
following resources require you to specify the capabilities
|
||||
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
|
||||
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
|
||||
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
|
||||
`AWS::IAM::UserToGroupAddition`_.
|
||||
|
||||
:type tags: dict
|
||||
:param tags: A dictionary of (key, value) pairs of tags to
|
||||
associate with this stack.
|
||||
:type on_failure: string
|
||||
:param on_failure: Determines what action will be taken if stack
|
||||
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
|
||||
DELETE. You can specify either `OnFailure` or `DisableRollback`,
|
||||
but not both.
|
||||
Default: `ROLLBACK`
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the stack policy body.
|
||||
(For more information, go to ` Prevent Updates to Stack Resources`_
|
||||
in the AWS CloudFormation User Guide.)
|
||||
If you pass `StackPolicyBody` and `StackPolicyURL`, only
|
||||
`StackPolicyBody` is used.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the stack
|
||||
policy. The URL must point to a policy (max size: 16KB) located in
|
||||
an S3 bucket in the same region as the stack. If you pass
|
||||
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
|
||||
used.
|
||||
|
||||
:type tags: list
|
||||
:param tags: A set of user-defined `Tags` to associate with this stack,
|
||||
represented by key/value pairs. Tags defined for the stack are
|
||||
propagated to EC2 resources that are created as part of the stack.
|
||||
A maximum number of 10 tags can be specified.
|
||||
|
||||
:type stack_policy_during_update_body: string
|
||||
:param stack_policy_during_update_body: Structure containing the
|
||||
temporary overriding stack policy body. If you pass
|
||||
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
|
||||
only `StackPolicyDuringUpdateBody` is used.
|
||||
If you want to update protected resources, specify a temporary
|
||||
overriding stack policy during this update. If you do not specify a
|
||||
stack policy, the current policy that associated with the stack
|
||||
will be used.
|
||||
|
||||
:type stack_policy_during_update_url: string
|
||||
:param stack_policy_during_update_url: Location of a file containing
|
||||
the temporary overriding stack policy. The URL must point to a
|
||||
policy (max size: 16KB) located in an S3 bucket in the same region
|
||||
as the stack. If you pass `StackPolicyDuringUpdateBody` and
|
||||
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
|
||||
used.
|
||||
If you want to update protected resources, specify a temporary
|
||||
overriding stack policy during this update. If you do not specify a
|
||||
stack policy, the current policy that is associated with the stack
|
||||
will be used.
|
||||
|
||||
:rtype: dict
|
||||
:return: JSON parameters represented as a Python dict.
|
||||
@ -131,7 +220,7 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
if template_body and template_url:
|
||||
boto.log.warning("If both TemplateBody and TemplateURL are"
|
||||
" specified, only TemplateBody will be honored by the API")
|
||||
if len(parameters) > 0:
|
||||
if parameters and len(parameters) > 0:
|
||||
for i, (key, value) in enumerate(parameters):
|
||||
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
|
||||
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
|
||||
@ -142,107 +231,224 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
for i, (key, value) in enumerate(tags.items()):
|
||||
params['Tags.member.%d.Key' % (i + 1)] = key
|
||||
params['Tags.member.%d.Value' % (i + 1)] = value
|
||||
if len(notification_arns) > 0:
|
||||
if notification_arns and len(notification_arns) > 0:
|
||||
self.build_list_params(params, notification_arns,
|
||||
"NotificationARNs.member")
|
||||
if timeout_in_minutes:
|
||||
params['TimeoutInMinutes'] = int(timeout_in_minutes)
|
||||
if disable_rollback is not None:
|
||||
params['DisableRollback'] = str(
|
||||
disable_rollback).lower()
|
||||
if on_failure is not None:
|
||||
params['OnFailure'] = on_failure
|
||||
if stack_policy_body is not None:
|
||||
params['StackPolicyBody'] = stack_policy_body
|
||||
if stack_policy_url is not None:
|
||||
params['StackPolicyURL'] = stack_policy_url
|
||||
if stack_policy_during_update_body is not None:
|
||||
params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body
|
||||
if stack_policy_during_update_url is not None:
|
||||
params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url
|
||||
return params
|
||||
|
||||
def create_stack(self, stack_name, template_body=None, template_url=None,
|
||||
parameters=[], notification_arns=[], disable_rollback=False,
|
||||
timeout_in_minutes=None, capabilities=None, tags=None):
|
||||
def _do_request(self, call, params, path, method):
|
||||
"""
|
||||
Creates a CloudFormation Stack as specified by the template.
|
||||
Do a request via ``self.make_request`` and parse the JSON response.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name of the Stack, must be unique amoung running
|
||||
Stacks
|
||||
:type call: string
|
||||
:param call: Call name, e.g. ``CreateStack``
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: The template body (JSON string)
|
||||
:type params: dict
|
||||
:param params: Dictionary of call parameters
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: An S3 URL of a stored template JSON document. If
|
||||
both the template_body and template_url are
|
||||
specified, the template_body takes precedence
|
||||
:type path: string
|
||||
:param path: Server path
|
||||
|
||||
:type parameters: list of tuples
|
||||
:param parameters: A list of (key, value) pairs for template input
|
||||
parameters.
|
||||
:type method: string
|
||||
:param method: HTTP method to use
|
||||
|
||||
:type notification_arns: list of strings
|
||||
:param notification_arns: A list of SNS topics to send Stack event
|
||||
notifications to.
|
||||
|
||||
:type disable_rollback: bool
|
||||
:param disable_rollback: Indicates whether or not to rollback on
|
||||
failure.
|
||||
|
||||
:type timeout_in_minutes: int
|
||||
:param timeout_in_minutes: Maximum amount of time to let the Stack
|
||||
spend creating itself. If this timeout is exceeded,
|
||||
the Stack will enter the CREATE_FAILED state.
|
||||
|
||||
:type capabilities: list
|
||||
:param capabilities: The list of capabilities you want to allow in
|
||||
the stack. Currently, the only valid capability is
|
||||
'CAPABILITY_IAM'.
|
||||
|
||||
:type tags: dict
|
||||
:param tags: A dictionary of (key, value) pairs of tags to
|
||||
associate with this stack.
|
||||
|
||||
:rtype: string
|
||||
:return: The unique Stack ID.
|
||||
:rtype: dict
|
||||
:return: Parsed JSON response data
|
||||
"""
|
||||
params = self._build_create_or_update_params(stack_name,
|
||||
template_body, template_url, parameters, notification_arns,
|
||||
disable_rollback, timeout_in_minutes, capabilities, tags)
|
||||
response = self.make_request('CreateStack', params, '/', 'POST')
|
||||
response = self.make_request(call, params, path, method)
|
||||
body = response.read()
|
||||
if response.status == 200:
|
||||
body = json.loads(body)
|
||||
return body['CreateStackResponse']['CreateStackResult']['StackId']
|
||||
return body
|
||||
else:
|
||||
boto.log.error('%s %s' % (response.status, response.reason))
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
raise self.ResponseError(response.status, response.reason, body=body)
|
||||
|
||||
def create_stack(self, stack_name, template_body=None, template_url=None,
|
||||
parameters=None, notification_arns=None, disable_rollback=None,
|
||||
timeout_in_minutes=None, capabilities=None, tags=None,
|
||||
on_failure=None, stack_policy_body=None, stack_policy_url=None):
|
||||
"""
|
||||
Creates a stack as specified in the template. After the call
|
||||
completes successfully, the stack creation starts. You can
|
||||
check the status of the stack via the DescribeStacks API.
|
||||
Currently, the limit for stacks is 20 stacks per account per
|
||||
region.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name:
|
||||
The name associated with the stack. The name must be unique within your
|
||||
AWS account.
|
||||
|
||||
Must contain only alphanumeric characters (case sensitive) and start
|
||||
with an alpha character. Maximum length of the name is 255
|
||||
characters.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template (max size: 307,200 bytes) located in
|
||||
an S3 bucket in the same region as the stack. For more information,
|
||||
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type parameters: list
|
||||
:param parameters: A list of key/value tuples that specify input
|
||||
parameters for the stack.
|
||||
|
||||
:type disable_rollback: boolean
|
||||
:param disable_rollback: Set to `True` to disable rollback of the stack
|
||||
if stack creation failed. You can specify either `DisableRollback`
|
||||
or `OnFailure`, but not both.
|
||||
Default: `False`
|
||||
|
||||
:type timeout_in_minutes: integer
|
||||
:param timeout_in_minutes: The amount of time that can pass before the
|
||||
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
|
||||
or is set to `False`, the stack will be rolled back.
|
||||
|
||||
:type notification_arns: list
|
||||
:param notification_arns: The Simple Notification Service (SNS) topic
|
||||
ARNs to publish stack related events. You can find your SNS topic
|
||||
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
|
||||
|
||||
:type capabilities: list
|
||||
:param capabilities: The list of capabilities that you want to allow in
|
||||
the stack. If your template contains certain resources, you must
|
||||
specify the CAPABILITY_IAM value for this parameter; otherwise,
|
||||
this action returns an InsufficientCapabilities error. The
|
||||
following resources require you to specify the capabilities
|
||||
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
|
||||
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
|
||||
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
|
||||
`AWS::IAM::UserToGroupAddition`_.
|
||||
|
||||
:type on_failure: string
|
||||
:param on_failure: Determines what action will be taken if stack
|
||||
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
|
||||
DELETE. You can specify either `OnFailure` or `DisableRollback`,
|
||||
but not both.
|
||||
Default: `ROLLBACK`
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the stack policy body.
|
||||
(For more information, go to ` Prevent Updates to Stack Resources`_
|
||||
in the AWS CloudFormation User Guide.)
|
||||
If you pass `StackPolicyBody` and `StackPolicyURL`, only
|
||||
`StackPolicyBody` is used.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the stack
|
||||
policy. The URL must point to a policy (max size: 16KB) located in
|
||||
an S3 bucket in the same region as the stack. If you pass
|
||||
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
|
||||
used.
|
||||
|
||||
:type tags: dict
|
||||
:param tags: A set of user-defined `Tags` to associate with this stack,
|
||||
represented by key/value pairs. Tags defined for the stack are
|
||||
propagated to EC2 resources that are created as part of the stack.
|
||||
A maximum number of 10 tags can be specified.
|
||||
"""
|
||||
params = self._build_create_or_update_params(stack_name, template_body,
|
||||
template_url, parameters, disable_rollback, timeout_in_minutes,
|
||||
notification_arns, capabilities, on_failure, stack_policy_body,
|
||||
stack_policy_url, tags)
|
||||
body = self._do_request('CreateStack', params, '/', 'POST')
|
||||
return body['CreateStackResponse']['CreateStackResult']['StackId']
|
||||
|
||||
def update_stack(self, stack_name, template_body=None, template_url=None,
|
||||
parameters=[], notification_arns=[], disable_rollback=False,
|
||||
timeout_in_minutes=None, capabilities=None, tags=None):
|
||||
parameters=None, notification_arns=None, disable_rollback=False,
|
||||
timeout_in_minutes=None, capabilities=None, tags=None,
|
||||
stack_policy_during_update_body=None,
|
||||
stack_policy_during_update_url=None,
|
||||
stack_policy_body=None, stack_policy_url=None):
|
||||
"""
|
||||
Updates a CloudFormation Stack as specified by the template.
|
||||
Updates a stack as specified in the template. After the call
|
||||
completes successfully, the stack update starts. You can check
|
||||
the status of the stack via the DescribeStacks action.
|
||||
|
||||
|
||||
|
||||
**Note: **You cannot update `AWS::S3::Bucket`_ resources, for
|
||||
example, to add or modify tags.
|
||||
|
||||
|
||||
|
||||
To get a copy of the template for an existing stack, you can
|
||||
use the GetTemplate action.
|
||||
|
||||
Tags that were associated with this stack during creation time
|
||||
will still be associated with the stack after an `UpdateStack`
|
||||
operation.
|
||||
|
||||
For more information about creating an update template,
|
||||
updating a stack, and monitoring the progress of the update,
|
||||
see `Updating a Stack`_.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name of the Stack, must be unique amoung running
|
||||
Stacks.
|
||||
:param stack_name:
|
||||
The name or stack ID of the stack to update.
|
||||
|
||||
Must contain only alphanumeric characters (case sensitive) and start
|
||||
with an alpha character. Maximum length of the name is 255
|
||||
characters.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: The template body (JSON string)
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: An S3 URL of a stored template JSON document. If
|
||||
both the template_body and template_url are
|
||||
specified, the template_body takes precedence.
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template located in an S3 bucket in the same
|
||||
region as the stack. For more information, go to `Template
|
||||
Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type parameters: list of tuples
|
||||
:param parameters: A list of (key, value) pairs for template input
|
||||
parameters.
|
||||
:type parameters: list
|
||||
:param parameters: A list of key/value tuples that specify input
|
||||
parameters for the stack.
|
||||
|
||||
:type notification_arns: list of strings
|
||||
:param notification_arns: A list of SNS topics to send Stack event
|
||||
notifications to.
|
||||
:type notification_arns: list
|
||||
:param notification_arns: The Simple Notification Service (SNS) topic
|
||||
ARNs to publish stack related events. You can find your SNS topic
|
||||
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
|
||||
|
||||
:type disable_rollback: bool
|
||||
:param disable_rollback: Indicates whether or not to rollback on
|
||||
failure.
|
||||
|
||||
:type timeout_in_minutes: int
|
||||
:param timeout_in_minutes: Maximum amount of time to let the Stack
|
||||
spend creating itself. If this timeout is exceeded,
|
||||
the Stack will enter the CREATE_FAILED state
|
||||
:type timeout_in_minutes: integer
|
||||
:param timeout_in_minutes: The amount of time that can pass before the
|
||||
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
|
||||
or is set to `False`, the stack will be rolled back.
|
||||
|
||||
:type capabilities: list
|
||||
:param capabilities: The list of capabilities you want to allow in
|
||||
@ -250,38 +456,86 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
'CAPABILITY_IAM'.
|
||||
|
||||
:type tags: dict
|
||||
:param tags: A dictionary of (key, value) pairs of tags to
|
||||
associate with this stack.
|
||||
:param tags: A set of user-defined `Tags` to associate with this stack,
|
||||
represented by key/value pairs. Tags defined for the stack are
|
||||
propagated to EC2 resources that are created as part of the stack.
|
||||
A maximum number of 10 tags can be specified.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template located in an S3 bucket in the same
|
||||
region as the stack. For more information, go to `Template
|
||||
Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type stack_policy_during_update_body: string
|
||||
:param stack_policy_during_update_body: Structure containing the
|
||||
temporary overriding stack policy body. If you pass
|
||||
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
|
||||
only `StackPolicyDuringUpdateBody` is used.
|
||||
If you want to update protected resources, specify a temporary
|
||||
overriding stack policy during this update. If you do not specify a
|
||||
stack policy, the current policy that associated with the stack
|
||||
will be used.
|
||||
|
||||
:type stack_policy_during_update_url: string
|
||||
:param stack_policy_during_update_url: Location of a file containing
|
||||
the temporary overriding stack policy. The URL must point to a
|
||||
policy (max size: 16KB) located in an S3 bucket in the same region
|
||||
as the stack. If you pass `StackPolicyDuringUpdateBody` and
|
||||
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
|
||||
used.
|
||||
If you want to update protected resources, specify a temporary
|
||||
overriding stack policy during this update. If you do not specify a
|
||||
stack policy, the current policy that is associated with the stack
|
||||
will be used.
|
||||
|
||||
:rtype: string
|
||||
:return: The unique Stack ID.
|
||||
"""
|
||||
params = self._build_create_or_update_params(stack_name,
|
||||
template_body, template_url, parameters, notification_arns,
|
||||
disable_rollback, timeout_in_minutes, capabilities, tags)
|
||||
response = self.make_request('UpdateStack', params, '/', 'POST')
|
||||
body = response.read()
|
||||
if response.status == 200:
|
||||
body = json.loads(body)
|
||||
return body['UpdateStackResponse']['UpdateStackResult']['StackId']
|
||||
else:
|
||||
boto.log.error('%s %s' % (response.status, response.reason))
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
params = self._build_create_or_update_params(stack_name, template_body,
|
||||
template_url, parameters, disable_rollback, timeout_in_minutes,
|
||||
notification_arns, capabilities, None, stack_policy_body,
|
||||
stack_policy_url, tags, stack_policy_during_update_body,
|
||||
stack_policy_during_update_url)
|
||||
body = self._do_request('UpdateStack', params, '/', 'POST')
|
||||
return body['UpdateStackResponse']['UpdateStackResult']['StackId']
|
||||
|
||||
def delete_stack(self, stack_name_or_id):
|
||||
"""
|
||||
Deletes a specified stack. Once the call completes
|
||||
successfully, stack deletion starts. Deleted stacks do not
|
||||
show up in the DescribeStacks API if the deletion has been
|
||||
completed successfully.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack.
|
||||
|
||||
"""
|
||||
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
|
||||
# TODO: change this to get_status ?
|
||||
response = self.make_request('DeleteStack', params, '/', 'GET')
|
||||
body = response.read()
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
else:
|
||||
boto.log.error('%s %s' % (response.status, response.reason))
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
return self._do_request('DeleteStack', params, '/', 'GET')
|
||||
|
||||
def describe_stack_events(self, stack_name_or_id=None, next_token=None):
|
||||
"""
|
||||
Returns all stack related events for a specified stack. For
|
||||
more information about a stack's event history, go to
|
||||
`Stacks`_ in the AWS CloudFormation User Guide.
|
||||
Events are returned, even if the stack never existed or has
|
||||
been successfully deleted.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack.
|
||||
Default: There is no default value.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
events, if there is one.
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name_or_id:
|
||||
params['StackName'] = stack_name_or_id
|
||||
@ -291,21 +545,82 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
StackEvent)])
|
||||
|
||||
def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
|
||||
"""
|
||||
Returns a description of the specified resource in the
|
||||
specified stack.
|
||||
|
||||
For deleted stacks, DescribeStackResource returns resource
|
||||
information for up to 90 days after the stack has been
|
||||
deleted.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack.
|
||||
Default: There is no default value.
|
||||
|
||||
:type logical_resource_id: string
|
||||
:param logical_resource_id: The logical name of the resource as
|
||||
specified in the template.
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {'ContentType': "JSON", 'StackName': stack_name_or_id,
|
||||
'LogicalResourceId': logical_resource_id}
|
||||
response = self.make_request('DescribeStackResource', params,
|
||||
'/', 'GET')
|
||||
body = response.read()
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
else:
|
||||
boto.log.error('%s %s' % (response.status, response.reason))
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
return self._do_request('DescribeStackResource', params, '/', 'GET')
|
||||
|
||||
def describe_stack_resources(self, stack_name_or_id=None,
|
||||
logical_resource_id=None,
|
||||
physical_resource_id=None):
|
||||
"""
|
||||
Returns AWS resource descriptions for running and deleted
|
||||
stacks. If `StackName` is specified, all the associated
|
||||
resources that are part of the stack are returned. If
|
||||
`PhysicalResourceId` is specified, the associated resources of
|
||||
the stack that the resource belongs to are returned.
|
||||
Only the first 100 resources will be returned. If your stack
|
||||
has more resources than this, you should use
|
||||
`ListStackResources` instead.
|
||||
For deleted stacks, `DescribeStackResources` returns resource
|
||||
information for up to 90 days after the stack has been
|
||||
deleted.
|
||||
|
||||
You must specify either `StackName` or `PhysicalResourceId`,
|
||||
but not both. In addition, you can specify `LogicalResourceId`
|
||||
to filter the returned result. For more information about
|
||||
resources, the `LogicalResourceId` and `PhysicalResourceId`,
|
||||
go to the `AWS CloudFormation User Guide`_.
|
||||
A `ValidationError` is returned if you specify both
|
||||
`StackName` and `PhysicalResourceId` in the same request.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack.
|
||||
Required: Conditional. If you do not specify `StackName`, you must
|
||||
specify `PhysicalResourceId`.
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
:type logical_resource_id: string
|
||||
:param logical_resource_id: The logical name of the resource as
|
||||
specified in the template.
|
||||
Default: There is no default value.
|
||||
|
||||
:type physical_resource_id: string
|
||||
:param physical_resource_id: The name or unique identifier that
|
||||
corresponds to a physical instance ID of a resource supported by
|
||||
AWS CloudFormation.
|
||||
For example, for an Amazon Elastic Compute Cloud (EC2) instance,
|
||||
`PhysicalResourceId` corresponds to the `InstanceId`. You can pass
|
||||
the EC2 `InstanceId` to `DescribeStackResources` to find which
|
||||
stack the instance belongs to and what other resources are part of
|
||||
the stack.
|
||||
|
||||
Required: Conditional. If you do not specify `PhysicalResourceId`, you
|
||||
must specify `StackName`.
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name_or_id:
|
||||
params['StackName'] = stack_name_or_id
|
||||
@ -316,35 +631,110 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
return self.get_list('DescribeStackResources', params,
|
||||
[('member', StackResource)])
|
||||
|
||||
def describe_stacks(self, stack_name_or_id=None):
|
||||
def describe_stacks(self, stack_name_or_id=None, next_token=None):
|
||||
"""
|
||||
Returns the description for the specified stack; if no stack
|
||||
name was specified, then it returns the description for all
|
||||
the stacks created.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack.
|
||||
Default: There is no default value.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
stacks, if there is one.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name_or_id:
|
||||
params['StackName'] = stack_name_or_id
|
||||
if next_token is not None:
|
||||
params['NextToken'] = next_token
|
||||
return self.get_list('DescribeStacks', params, [('member', Stack)])
|
||||
|
||||
def get_template(self, stack_name_or_id):
|
||||
"""
|
||||
Returns the template body for a specified stack. You can get
|
||||
the template for running or deleted stacks.
|
||||
|
||||
For deleted stacks, GetTemplate returns the template for up to
|
||||
90 days after the stack has been deleted.
|
||||
If the template does not exist, a `ValidationError` is
|
||||
returned.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack, which are not always interchangeable:
|
||||
|
||||
+ Running stacks: You can specify either the stack's name or its unique
|
||||
stack ID.
|
||||
+ Deleted stacks: You must specify the unique stack ID.
|
||||
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
|
||||
response = self.make_request('GetTemplate', params, '/', 'GET')
|
||||
body = response.read()
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
else:
|
||||
boto.log.error('%s %s' % (response.status, response.reason))
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
return self._do_request('GetTemplate', params, '/', 'GET')
|
||||
|
||||
def list_stack_resources(self, stack_name_or_id, next_token=None):
|
||||
"""
|
||||
Returns descriptions of all resources of the specified stack.
|
||||
|
||||
For deleted stacks, ListStackResources returns resource
|
||||
information for up to 90 days after the stack has been
|
||||
deleted.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated
|
||||
with the stack, which are not always interchangeable:
|
||||
|
||||
+ Running stacks: You can specify either the stack's name or its unique
|
||||
stack ID.
|
||||
+ Deleted stacks: You must specify the unique stack ID.
|
||||
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
stack resource summaries, if there is one.
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name_or_id}
|
||||
if next_token:
|
||||
params['NextToken'] = next_token
|
||||
return self.get_list('ListStackResources', params,
|
||||
[('member', StackResourceSummary)])
|
||||
|
||||
def list_stacks(self, stack_status_filters=[], next_token=None):
|
||||
def list_stacks(self, stack_status_filters=None, next_token=None):
|
||||
"""
|
||||
Returns the summary information for stacks whose status
|
||||
matches the specified StackStatusFilter. Summary information
|
||||
for stacks that have been deleted is kept for 90 days after
|
||||
the stack is deleted. If no StackStatusFilter is specified,
|
||||
summary information for all stacks is returned (including
|
||||
existing stacks and stacks that have been deleted).
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
stacks, if there is one.
|
||||
Default: There is no default value.
|
||||
|
||||
:type stack_status_filter: list
|
||||
:param stack_status_filter: Stack status to use as a filter. Specify
|
||||
one or more stack status codes to list only stacks with the
|
||||
specified status codes. For a complete list of stack status codes,
|
||||
see the `StackStatus` parameter of the Stack data type.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if next_token:
|
||||
params['NextToken'] = next_token
|
||||
if len(stack_status_filters) > 0:
|
||||
if stack_status_filters and len(stack_status_filters) > 0:
|
||||
self.build_list_params(params, stack_status_filters,
|
||||
"StackStatusFilter.member")
|
||||
|
||||
@ -352,6 +742,25 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
[('member', StackSummary)])
|
||||
|
||||
def validate_template(self, template_body=None, template_url=None):
|
||||
"""
|
||||
Validates a specified template.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: String containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template (max size: 307,200 bytes) located in
|
||||
an S3 bucket in the same region as the stack. For more information,
|
||||
go to `Template Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if template_body:
|
||||
params['TemplateBody'] = template_body
|
||||
@ -364,7 +773,116 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
verb="POST")
|
||||
|
||||
def cancel_update_stack(self, stack_name_or_id=None):
|
||||
"""
|
||||
Cancels an update on the specified stack. If the call
|
||||
completes successfully, the stack will roll back the update
|
||||
and revert to the previous stack configuration.
|
||||
Only stacks that are in the UPDATE_IN_PROGRESS state can be
|
||||
canceled.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or the unique identifier associated with
|
||||
the stack.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name_or_id:
|
||||
params['StackName'] = stack_name_or_id
|
||||
return self.get_status('CancelUpdateStack', params)
|
||||
|
||||
def estimate_template_cost(self, template_body=None, template_url=None,
|
||||
parameters=None):
|
||||
"""
|
||||
Returns the estimated monthly cost of a template. The return
|
||||
value is an AWS Simple Monthly Calculator URL with a query
|
||||
string that describes the resources required to run the
|
||||
template.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template located in an S3 bucket in the same
|
||||
region as the stack. For more information, go to `Template
|
||||
Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type parameters: list
|
||||
:param parameters: A list of key/value tuples that specify input
|
||||
parameters for the template.
|
||||
|
||||
:rtype: string
|
||||
:returns: URL to pre-filled cost calculator
|
||||
"""
|
||||
params = {'ContentType': "JSON"}
|
||||
if template_body is not None:
|
||||
params['TemplateBody'] = template_body
|
||||
if template_url is not None:
|
||||
params['TemplateURL'] = template_url
|
||||
if parameters and len(parameters) > 0:
|
||||
for i, (key, value) in enumerate(parameters):
|
||||
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
|
||||
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
|
||||
|
||||
response = self._do_request('EstimateTemplateCost', params, '/', 'POST')
|
||||
return response['EstimateTemplateCostResponse']\
|
||||
['EstimateTemplateCostResult']\
|
||||
['Url']
|
||||
|
||||
def get_stack_policy(self, stack_name_or_id):
|
||||
"""
|
||||
Returns the stack policy for a specified stack. If a stack
|
||||
doesn't have a policy, a null value is returned.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or stack ID that is associated with
|
||||
the stack whose policy you want to get.
|
||||
|
||||
:rtype: string
|
||||
:return: The policy JSON document
|
||||
"""
|
||||
params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
|
||||
response = self._do_request('GetStackPolicy', params, '/', 'POST')
|
||||
return response['GetStackPolicyResponse']\
|
||||
['GetStackPolicyResult']\
|
||||
['StackPolicyBody']
|
||||
|
||||
def set_stack_policy(self, stack_name_or_id, stack_policy_body=None,
|
||||
stack_policy_url=None):
|
||||
"""
|
||||
Sets a stack policy for a specified stack.
|
||||
|
||||
:type stack_name_or_id: string
|
||||
:param stack_name_or_id: The name or stack ID that you want to
|
||||
associate a policy with.
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the stack policy body.
|
||||
(For more information, go to ` Prevent Updates to Stack Resources`_
|
||||
in the AWS CloudFormation User Guide.)
|
||||
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
|
||||
passed, only `StackPolicyBody` is used.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the stack
|
||||
policy. The URL must point to a policy (max size: 16KB) located in
|
||||
an S3 bucket in the same region as the stack. You must pass
|
||||
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
|
||||
`StackPolicyBody` is used.
|
||||
|
||||
"""
|
||||
params = {'ContentType': "JSON", 'StackName': stack_name_or_id, }
|
||||
if stack_policy_body is not None:
|
||||
params['StackPolicyBody'] = stack_policy_body
|
||||
if stack_policy_url is not None:
|
||||
params['StackPolicyURL'] = stack_policy_url
|
||||
|
||||
response = self._do_request('SetStackPolicy', params, '/', 'POST')
|
||||
return response['SetStackPolicyResponse']\
|
||||
['SetStackPolicyResult']
|
||||
|
||||
773
awx/lib/site-packages/boto/cloudformation/layer1.py
Normal file
773
awx/lib/site-packages/boto/cloudformation/layer1.py
Normal file
@ -0,0 +1,773 @@
|
||||
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.cloudformation import exceptions
|
||||
|
||||
|
||||
class CloudFormationConnection(AWSQueryConnection):
|
||||
"""
|
||||
AWS CloudFormation
|
||||
AWS CloudFormation enables you to create and manage AWS
|
||||
infrastructure deployments predictably and repeatedly. AWS
|
||||
CloudFormation helps you leverage AWS products such as Amazon EC2,
|
||||
EBS, Amazon SNS, ELB, and Auto Scaling to build highly-reliable,
|
||||
highly scalable, cost effective applications without worrying
|
||||
about creating and configuring the underlying AWS infrastructure.
|
||||
|
||||
With AWS CloudFormation, you declare all of your resources and
|
||||
dependencies in a template file. The template defines a collection
|
||||
of resources as a single unit called a stack. AWS CloudFormation
|
||||
creates and deletes all member resources of the stack together and
|
||||
manages all dependencies between the resources for you.
|
||||
|
||||
For more information about this product, go to the `CloudFormation
|
||||
Product Page`_.
|
||||
|
||||
Amazon CloudFormation makes use of other AWS products. If you need
|
||||
additional technical information about a specific AWS product, you
|
||||
can find the product's technical documentation at
|
||||
`http://aws.amazon.com/documentation/`_.
|
||||
"""
|
||||
APIVersion = "2010-05-15"
|
||||
DefaultRegionName = "us-east-1"
|
||||
DefaultRegionEndpoint = "cloudformation.us-east-1.amazonaws.com"
|
||||
ResponseError = JSONResponseError
|
||||
|
||||
_faults = {
|
||||
"AlreadyExistsException": exceptions.AlreadyExistsException,
|
||||
"InsufficientCapabilitiesException": exceptions.InsufficientCapabilitiesException,
|
||||
"LimitExceededException": exceptions.LimitExceededException,
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
region = kwargs.pop('region', None)
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
|
||||
if 'host' not in kwargs:
|
||||
kwargs['host'] = region.endpoint
|
||||
|
||||
super(CloudFormationConnection, self).__init__(**kwargs)
|
||||
self.region = region
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
|
||||
def cancel_update_stack(self, stack_name):
|
||||
"""
|
||||
Cancels an update on the specified stack. If the call
|
||||
completes successfully, the stack will roll back the update
|
||||
and revert to the previous stack configuration.
|
||||
Only stacks that are in the UPDATE_IN_PROGRESS state can be
|
||||
canceled.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
return self._make_request(
|
||||
action='CancelUpdateStack',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def create_stack(self, stack_name, template_body=None, template_url=None,
|
||||
parameters=None, disable_rollback=None,
|
||||
timeout_in_minutes=None, notification_arns=None,
|
||||
capabilities=None, on_failure=None,
|
||||
stack_policy_body=None, stack_policy_url=None,
|
||||
tags=None):
|
||||
"""
|
||||
Creates a stack as specified in the template. After the call
|
||||
completes successfully, the stack creation starts. You can
|
||||
check the status of the stack via the DescribeStacks API.
|
||||
Currently, the limit for stacks is 20 stacks per account per
|
||||
region.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name:
|
||||
The name associated with the stack. The name must be unique within your
|
||||
AWS account.
|
||||
|
||||
Must contain only alphanumeric characters (case sensitive) and start
|
||||
with an alpha character. Maximum length of the name is 255
|
||||
characters.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template (max size: 307,200 bytes) located in
|
||||
an S3 bucket in the same region as the stack. For more information,
|
||||
go to the `Template Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type parameters: list
|
||||
:param parameters: A list of `Parameter` structures that specify input
|
||||
parameters for the stack.
|
||||
|
||||
:type disable_rollback: boolean
|
||||
:param disable_rollback: Set to `True` to disable rollback of the stack
|
||||
if stack creation failed. You can specify either `DisableRollback`
|
||||
or `OnFailure`, but not both.
|
||||
Default: `False`
|
||||
|
||||
:type timeout_in_minutes: integer
|
||||
:param timeout_in_minutes: The amount of time that can pass before the
|
||||
stack status becomes CREATE_FAILED; if `DisableRollback` is not set
|
||||
or is set to `False`, the stack will be rolled back.
|
||||
|
||||
:type notification_arns: list
|
||||
:param notification_arns: The Simple Notification Service (SNS) topic
|
||||
ARNs to publish stack related events. You can find your SNS topic
|
||||
ARNs using the `SNS console`_ or your Command Line Interface (CLI).
|
||||
|
||||
:type capabilities: list
|
||||
:param capabilities: The list of capabilities that you want to allow in
|
||||
the stack. If your template contains certain resources, you must
|
||||
specify the CAPABILITY_IAM value for this parameter; otherwise,
|
||||
this action returns an InsufficientCapabilities error. The
|
||||
following resources require you to specify the capabilities
|
||||
parameter: `AWS::CloudFormation::Stack`_, `AWS::IAM::AccessKey`_,
|
||||
`AWS::IAM::Group`_, `AWS::IAM::InstanceProfile`_,
|
||||
`AWS::IAM::Policy`_, `AWS::IAM::Role`_, `AWS::IAM::User`_, and
|
||||
`AWS::IAM::UserToGroupAddition`_.
|
||||
|
||||
:type on_failure: string
|
||||
:param on_failure: Determines what action will be taken if stack
|
||||
creation fails. This must be one of: DO_NOTHING, ROLLBACK, or
|
||||
DELETE. You can specify either `OnFailure` or `DisableRollback`,
|
||||
but not both.
|
||||
Default: `ROLLBACK`
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the stack policy body.
|
||||
(For more information, go to ` Prevent Updates to Stack Resources`_
|
||||
in the AWS CloudFormation User Guide.)
|
||||
If you pass `StackPolicyBody` and `StackPolicyURL`, only
|
||||
`StackPolicyBody` is used.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the stack
|
||||
policy. The URL must point to a policy (max size: 16KB) located in
|
||||
an S3 bucket in the same region as the stack. If you pass
|
||||
`StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody` is
|
||||
used.
|
||||
|
||||
:type tags: list
|
||||
:param tags: A set of user-defined `Tags` to associate with this stack,
|
||||
represented by key/value pairs. Tags defined for the stack are
|
||||
propagated to EC2 resources that are created as part of the stack.
|
||||
A maximum number of 10 tags can be specified.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
if template_body is not None:
|
||||
params['TemplateBody'] = template_body
|
||||
if template_url is not None:
|
||||
params['TemplateURL'] = template_url
|
||||
if parameters is not None:
|
||||
self.build_complex_list_params(
|
||||
params, parameters,
|
||||
'Parameters.member',
|
||||
('ParameterKey', 'ParameterValue'))
|
||||
if disable_rollback is not None:
|
||||
params['DisableRollback'] = str(
|
||||
disable_rollback).lower()
|
||||
if timeout_in_minutes is not None:
|
||||
params['TimeoutInMinutes'] = timeout_in_minutes
|
||||
if notification_arns is not None:
|
||||
self.build_list_params(params,
|
||||
notification_arns,
|
||||
'NotificationARNs.member')
|
||||
if capabilities is not None:
|
||||
self.build_list_params(params,
|
||||
capabilities,
|
||||
'Capabilities.member')
|
||||
if on_failure is not None:
|
||||
params['OnFailure'] = on_failure
|
||||
if stack_policy_body is not None:
|
||||
params['StackPolicyBody'] = stack_policy_body
|
||||
if stack_policy_url is not None:
|
||||
params['StackPolicyURL'] = stack_policy_url
|
||||
if tags is not None:
|
||||
self.build_complex_list_params(
|
||||
params, tags,
|
||||
'Tags.member',
|
||||
('Key', 'Value'))
|
||||
return self._make_request(
|
||||
action='CreateStack',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def delete_stack(self, stack_name):
|
||||
"""
|
||||
Deletes a specified stack. Once the call completes
|
||||
successfully, stack deletion starts. Deleted stacks do not
|
||||
show up in the DescribeStacks API if the deletion has been
|
||||
completed successfully.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
return self._make_request(
|
||||
action='DeleteStack',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_stack_events(self, stack_name=None, next_token=None):
|
||||
"""
|
||||
Returns all stack related events for a specified stack. For
|
||||
more information about a stack's event history, go to
|
||||
`Stacks`_ in the AWS CloudFormation User Guide.
|
||||
Events are returned, even if the stack never existed or has
|
||||
been successfully deleted.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack.
|
||||
Default: There is no default value.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
events, if there is one.
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name is not None:
|
||||
params['StackName'] = stack_name
|
||||
if next_token is not None:
|
||||
params['NextToken'] = next_token
|
||||
return self._make_request(
|
||||
action='DescribeStackEvents',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_stack_resource(self, stack_name, logical_resource_id):
|
||||
"""
|
||||
Returns a description of the specified resource in the
|
||||
specified stack.
|
||||
|
||||
For deleted stacks, DescribeStackResource returns resource
|
||||
information for up to 90 days after the stack has been
|
||||
deleted.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack.
|
||||
Default: There is no default value.
|
||||
|
||||
:type logical_resource_id: string
|
||||
:param logical_resource_id: The logical name of the resource as
|
||||
specified in the template.
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'StackName': stack_name,
|
||||
'LogicalResourceId': logical_resource_id,
|
||||
}
|
||||
return self._make_request(
|
||||
action='DescribeStackResource',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_stack_resources(self, stack_name=None,
|
||||
logical_resource_id=None,
|
||||
physical_resource_id=None):
|
||||
"""
|
||||
Returns AWS resource descriptions for running and deleted
|
||||
stacks. If `StackName` is specified, all the associated
|
||||
resources that are part of the stack are returned. If
|
||||
`PhysicalResourceId` is specified, the associated resources of
|
||||
the stack that the resource belongs to are returned.
|
||||
Only the first 100 resources will be returned. If your stack
|
||||
has more resources than this, you should use
|
||||
`ListStackResources` instead.
|
||||
For deleted stacks, `DescribeStackResources` returns resource
|
||||
information for up to 90 days after the stack has been
|
||||
deleted.
|
||||
|
||||
You must specify either `StackName` or `PhysicalResourceId`,
|
||||
but not both. In addition, you can specify `LogicalResourceId`
|
||||
to filter the returned result. For more information about
|
||||
resources, the `LogicalResourceId` and `PhysicalResourceId`,
|
||||
go to the `AWS CloudFormation User Guide`_.
|
||||
A `ValidationError` is returned if you specify both
|
||||
`StackName` and `PhysicalResourceId` in the same request.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack.
|
||||
Required: Conditional. If you do not specify `StackName`, you must
|
||||
specify `PhysicalResourceId`.
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
:type logical_resource_id: string
|
||||
:param logical_resource_id: The logical name of the resource as
|
||||
specified in the template.
|
||||
Default: There is no default value.
|
||||
|
||||
:type physical_resource_id: string
|
||||
:param physical_resource_id: The name or unique identifier that
|
||||
corresponds to a physical instance ID of a resource supported by
|
||||
AWS CloudFormation.
|
||||
For example, for an Amazon Elastic Compute Cloud (EC2) instance,
|
||||
`PhysicalResourceId` corresponds to the `InstanceId`. You can pass
|
||||
the EC2 `InstanceId` to `DescribeStackResources` to find which
|
||||
stack the instance belongs to and what other resources are part of
|
||||
the stack.
|
||||
|
||||
Required: Conditional. If you do not specify `PhysicalResourceId`, you
|
||||
must specify `StackName`.
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name is not None:
|
||||
params['StackName'] = stack_name
|
||||
if logical_resource_id is not None:
|
||||
params['LogicalResourceId'] = logical_resource_id
|
||||
if physical_resource_id is not None:
|
||||
params['PhysicalResourceId'] = physical_resource_id
|
||||
return self._make_request(
|
||||
action='DescribeStackResources',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_stacks(self, stack_name=None, next_token=None):
|
||||
"""
|
||||
Returns the description for the specified stack; if no stack
|
||||
name was specified, then it returns the description for all
|
||||
the stacks created.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack.
|
||||
Default: There is no default value.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
stacks, if there is one.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if stack_name is not None:
|
||||
params['StackName'] = stack_name
|
||||
if next_token is not None:
|
||||
params['NextToken'] = next_token
|
||||
return self._make_request(
|
||||
action='DescribeStacks',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def estimate_template_cost(self, template_body=None, template_url=None,
|
||||
parameters=None):
|
||||
"""
|
||||
Returns the estimated monthly cost of a template. The return
|
||||
value is an AWS Simple Monthly Calculator URL with a query
|
||||
string that describes the resources required to run the
|
||||
template.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template located in an S3 bucket in the same
|
||||
region as the stack. For more information, go to `Template
|
||||
Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type parameters: list
|
||||
:param parameters: A list of `Parameter` structures that specify input
|
||||
parameters.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if template_body is not None:
|
||||
params['TemplateBody'] = template_body
|
||||
if template_url is not None:
|
||||
params['TemplateURL'] = template_url
|
||||
if parameters is not None:
|
||||
self.build_complex_list_params(
|
||||
params, parameters,
|
||||
'Parameters.member',
|
||||
('ParameterKey', 'ParameterValue'))
|
||||
return self._make_request(
|
||||
action='EstimateTemplateCost',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def get_stack_policy(self, stack_name):
|
||||
"""
|
||||
Returns the stack policy for a specified stack. If a stack
|
||||
doesn't have a policy, a null value is returned.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or stack ID that is associated with the
|
||||
stack whose policy you want to get.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
return self._make_request(
|
||||
action='GetStackPolicy',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def get_template(self, stack_name):
|
||||
"""
|
||||
Returns the template body for a specified stack. You can get
|
||||
the template for running or deleted stacks.
|
||||
|
||||
For deleted stacks, GetTemplate returns the template for up to
|
||||
90 days after the stack has been deleted.
|
||||
If the template does not exist, a `ValidationError` is
|
||||
returned.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack, which are not always interchangeable:
|
||||
|
||||
+ Running stacks: You can specify either the stack's name or its unique
|
||||
stack ID.
|
||||
+ Deleted stacks: You must specify the unique stack ID.
|
||||
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
return self._make_request(
|
||||
action='GetTemplate',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def list_stack_resources(self, stack_name, next_token=None):
|
||||
"""
|
||||
Returns descriptions of all resources of the specified stack.
|
||||
|
||||
For deleted stacks, ListStackResources returns resource
|
||||
information for up to 90 days after the stack has been
|
||||
deleted.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or the unique identifier associated with
|
||||
the stack, which are not always interchangeable:
|
||||
|
||||
+ Running stacks: You can specify either the stack's name or its unique
|
||||
stack ID.
|
||||
+ Deleted stacks: You must specify the unique stack ID.
|
||||
|
||||
|
||||
Default: There is no default value.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
stack resource summaries, if there is one.
|
||||
Default: There is no default value.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
if next_token is not None:
|
||||
params['NextToken'] = next_token
|
||||
return self._make_request(
|
||||
action='ListStackResources',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def list_stacks(self, next_token=None, stack_status_filter=None):
|
||||
"""
|
||||
Returns the summary information for stacks whose status
|
||||
matches the specified StackStatusFilter. Summary information
|
||||
for stacks that have been deleted is kept for 90 days after
|
||||
the stack is deleted. If no StackStatusFilter is specified,
|
||||
summary information for all stacks is returned (including
|
||||
existing stacks and stacks that have been deleted).
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: String that identifies the start of the next list of
|
||||
stacks, if there is one.
|
||||
Default: There is no default value.
|
||||
|
||||
:type stack_status_filter: list
|
||||
:param stack_status_filter: Stack status to use as a filter. Specify
|
||||
one or more stack status codes to list only stacks with the
|
||||
specified status codes. For a complete list of stack status codes,
|
||||
see the `StackStatus` parameter of the Stack data type.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if next_token is not None:
|
||||
params['NextToken'] = next_token
|
||||
if stack_status_filter is not None:
|
||||
self.build_list_params(params,
|
||||
stack_status_filter,
|
||||
'StackStatusFilter.member')
|
||||
return self._make_request(
|
||||
action='ListStacks',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def set_stack_policy(self, stack_name, stack_policy_body=None,
|
||||
stack_policy_url=None):
|
||||
"""
|
||||
Sets a stack policy for a specified stack.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name: The name or stack ID that you want to associate a
|
||||
policy with.
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the stack policy body.
|
||||
(For more information, go to ` Prevent Updates to Stack Resources`_
|
||||
in the AWS CloudFormation User Guide.)
|
||||
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
|
||||
passed, only `StackPolicyBody` is used.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the stack
|
||||
policy. The URL must point to a policy (max size: 16KB) located in
|
||||
an S3 bucket in the same region as the stack. You must pass
|
||||
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
|
||||
`StackPolicyBody` is used.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
if stack_policy_body is not None:
|
||||
params['StackPolicyBody'] = stack_policy_body
|
||||
if stack_policy_url is not None:
|
||||
params['StackPolicyURL'] = stack_policy_url
|
||||
return self._make_request(
|
||||
action='SetStackPolicy',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def update_stack(self, stack_name, template_body=None, template_url=None,
|
||||
stack_policy_during_update_body=None,
|
||||
stack_policy_during_update_url=None, parameters=None,
|
||||
capabilities=None, stack_policy_body=None,
|
||||
stack_policy_url=None):
|
||||
"""
|
||||
Updates a stack as specified in the template. After the call
|
||||
completes successfully, the stack update starts. You can check
|
||||
the status of the stack via the DescribeStacks action.
|
||||
|
||||
|
||||
|
||||
**Note: **You cannot update `AWS::S3::Bucket`_ resources, for
|
||||
example, to add or modify tags.
|
||||
|
||||
|
||||
|
||||
To get a copy of the template for an existing stack, you can
|
||||
use the GetTemplate action.
|
||||
|
||||
Tags that were associated with this stack during creation time
|
||||
will still be associated with the stack after an `UpdateStack`
|
||||
operation.
|
||||
|
||||
For more information about creating an update template,
|
||||
updating a stack, and monitoring the progress of the update,
|
||||
see `Updating a Stack`_.
|
||||
|
||||
:type stack_name: string
|
||||
:param stack_name:
|
||||
The name or stack ID of the stack to update.
|
||||
|
||||
Must contain only alphanumeric characters (case sensitive) and start
|
||||
with an alpha character. Maximum length of the name is 255
|
||||
characters.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: Structure containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateBody` or `TemplateURL`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template located in an S3 bucket in the same
|
||||
region as the stack. For more information, go to `Template
|
||||
Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type stack_policy_during_update_body: string
|
||||
:param stack_policy_during_update_body: Structure containing the
|
||||
temporary overriding stack policy body. If you pass
|
||||
`StackPolicyDuringUpdateBody` and `StackPolicyDuringUpdateURL`,
|
||||
only `StackPolicyDuringUpdateBody` is used.
|
||||
If you want to update protected resources, specify a temporary
|
||||
overriding stack policy during this update. If you do not specify a
|
||||
stack policy, the current policy that associated with the stack
|
||||
will be used.
|
||||
|
||||
:type stack_policy_during_update_url: string
|
||||
:param stack_policy_during_update_url: Location of a file containing
|
||||
the temporary overriding stack policy. The URL must point to a
|
||||
policy (max size: 16KB) located in an S3 bucket in the same region
|
||||
as the stack. If you pass `StackPolicyDuringUpdateBody` and
|
||||
`StackPolicyDuringUpdateURL`, only `StackPolicyDuringUpdateBody` is
|
||||
used.
|
||||
If you want to update protected resources, specify a temporary
|
||||
overriding stack policy during this update. If you do not specify a
|
||||
stack policy, the current policy that is associated with the stack
|
||||
will be used.
|
||||
|
||||
:type parameters: list
|
||||
:param parameters: A list of `Parameter` structures that specify input
|
||||
parameters for the stack.
|
||||
|
||||
:type capabilities: list
|
||||
:param capabilities: The list of capabilities that you want to allow in
|
||||
the stack. If your stack contains IAM resources, you must specify
|
||||
the CAPABILITY_IAM value for this parameter; otherwise, this action
|
||||
returns an InsufficientCapabilities error. IAM resources are the
|
||||
following: `AWS::IAM::AccessKey`_, `AWS::IAM::Group`_,
|
||||
`AWS::IAM::Policy`_, `AWS::IAM::User`_, and
|
||||
`AWS::IAM::UserToGroupAddition`_.
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the updated stack policy
|
||||
body. If you pass `StackPolicyBody` and `StackPolicyURL`, only
|
||||
`StackPolicyBody` is used.
|
||||
If you want to update a stack policy during a stack update, specify an
|
||||
updated stack policy. For example, you can include an updated stack
|
||||
policy to protect a new resource created in the stack update. If
|
||||
you do not specify a stack policy, the current policy that is
|
||||
associated with the stack is unchanged.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the updated
|
||||
stack policy. The URL must point to a policy (max size: 16KB)
|
||||
located in an S3 bucket in the same region as the stack. If you
|
||||
pass `StackPolicyBody` and `StackPolicyURL`, only `StackPolicyBody`
|
||||
is used.
|
||||
If you want to update a stack policy during a stack update, specify an
|
||||
updated stack policy. For example, you can include an updated stack
|
||||
policy to protect a new resource created in the stack update. If
|
||||
you do not specify a stack policy, the current policy that is
|
||||
associated with the stack is unchanged.
|
||||
|
||||
"""
|
||||
params = {'StackName': stack_name, }
|
||||
if template_body is not None:
|
||||
params['TemplateBody'] = template_body
|
||||
if template_url is not None:
|
||||
params['TemplateURL'] = template_url
|
||||
if stack_policy_during_update_body is not None:
|
||||
params['StackPolicyDuringUpdateBody'] = stack_policy_during_update_body
|
||||
if stack_policy_during_update_url is not None:
|
||||
params['StackPolicyDuringUpdateURL'] = stack_policy_during_update_url
|
||||
if parameters is not None:
|
||||
self.build_complex_list_params(
|
||||
params, parameters,
|
||||
'Parameters.member',
|
||||
('ParameterKey', 'ParameterValue'))
|
||||
if capabilities is not None:
|
||||
self.build_list_params(params,
|
||||
capabilities,
|
||||
'Capabilities.member')
|
||||
if stack_policy_body is not None:
|
||||
params['StackPolicyBody'] = stack_policy_body
|
||||
if stack_policy_url is not None:
|
||||
params['StackPolicyURL'] = stack_policy_url
|
||||
return self._make_request(
|
||||
action='UpdateStack',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def validate_template(self, template_body=None, template_url=None):
|
||||
"""
|
||||
Validates a specified template.
|
||||
|
||||
:type template_body: string
|
||||
:param template_body: String containing the template body. (For more
|
||||
information, go to `Template Anatomy`_ in the AWS CloudFormation
|
||||
User Guide.)
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
:type template_url: string
|
||||
:param template_url: Location of file containing the template body. The
|
||||
URL must point to a template (max size: 307,200 bytes) located in
|
||||
an S3 bucket in the same region as the stack. For more information,
|
||||
go to `Template Anatomy`_ in the AWS CloudFormation User Guide.
|
||||
Conditional: You must pass `TemplateURL` or `TemplateBody`. If both are
|
||||
passed, only `TemplateBody` is used.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if template_body is not None:
|
||||
params['TemplateBody'] = template_body
|
||||
if template_url is not None:
|
||||
params['TemplateURL'] = template_url
|
||||
return self._make_request(
|
||||
action='ValidateTemplate',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def _make_request(self, action, verb, path, params):
|
||||
params['ContentType'] = 'JSON'
|
||||
response = self.make_request(action=action, verb='POST',
|
||||
path='/', params=params)
|
||||
body = response.read()
|
||||
boto.log.debug(body)
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
else:
|
||||
json_body = json.loads(body)
|
||||
fault_name = json_body.get('Error', {}).get('Code', None)
|
||||
exception_class = self._faults.get(fault_name, self.ResponseError)
|
||||
raise exception_class(response.status, response.reason,
|
||||
body=json_body)
|
||||
@ -107,6 +107,35 @@ class Stack(object):
|
||||
def get_template(self):
|
||||
return self.connection.get_template(stack_name_or_id=self.stack_id)
|
||||
|
||||
def get_policy(self):
|
||||
"""
|
||||
Returns the stack policy for this stack. If it has no policy
|
||||
then, a null value is returned.
|
||||
"""
|
||||
return self.connection.get_stack_policy(self.stack_id)
|
||||
|
||||
def set_policy(self, stack_policy_body=None, stack_policy_url=None):
|
||||
"""
|
||||
Sets a stack policy for this stack.
|
||||
|
||||
:type stack_policy_body: string
|
||||
:param stack_policy_body: Structure containing the stack policy body.
|
||||
(For more information, go to ` Prevent Updates to Stack Resources`_
|
||||
in the AWS CloudFormation User Guide.)
|
||||
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
|
||||
passed, only `StackPolicyBody` is used.
|
||||
|
||||
:type stack_policy_url: string
|
||||
:param stack_policy_url: Location of a file containing the stack
|
||||
policy. The URL must point to a policy (max size: 16KB) located in
|
||||
an S3 bucket in the same region as the stack. You must pass
|
||||
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
|
||||
`StackPolicyBody` is used.
|
||||
"""
|
||||
return self.connection.set_stack_policy(self.stack_id,
|
||||
stack_policy_body=stack_policy_body,
|
||||
stack_policy_url=stack_policy_url)
|
||||
|
||||
|
||||
class StackSummary(object):
|
||||
def __init__(self, connection=None):
|
||||
|
||||
@ -1,21 +1,29 @@
|
||||
from boto.resultset import ResultSet
|
||||
from boto.cloudformation.stack import Capability
|
||||
|
||||
class Template(object):
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.description = None
|
||||
self.template_parameters = None
|
||||
self.capabilities_reason = None
|
||||
self.capabilities = None
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == "Parameters":
|
||||
self.template_parameters = ResultSet([('member', TemplateParameter)])
|
||||
return self.template_parameters
|
||||
elif name == "Capabilities":
|
||||
self.capabilities = ResultSet([('member', Capability)])
|
||||
return self.capabilities
|
||||
else:
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == "Description":
|
||||
self.description = value
|
||||
elif name == "CapabilitiesReason":
|
||||
self.capabilities_reason = value
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
|
||||
@ -43,12 +43,14 @@ class CloudFrontConnection(AWSAuthConnection):
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
port=None, proxy=None, proxy_port=None,
|
||||
host=DefaultHost, debug=0, security_token=None,
|
||||
validate_certs=True):
|
||||
validate_certs=True, profile_name=None, https_connection_factory=None):
|
||||
super(CloudFrontConnection, self).__init__(host,
|
||||
aws_access_key_id, aws_secret_access_key,
|
||||
True, port, proxy, proxy_port, debug=debug,
|
||||
security_token=security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
https_connection_factory=https_connection_factory,
|
||||
profile_name=profile_name)
|
||||
|
||||
def get_etag(self, response):
|
||||
response_headers = response.msg
|
||||
|
||||
@ -103,6 +103,9 @@ class DistributionConfig(object):
|
||||
self.logging = logging
|
||||
self.default_root_object = default_root_object
|
||||
|
||||
def __repr__(self):
|
||||
return "DistributionConfig:%s" % self.origin
|
||||
|
||||
def to_xml(self):
|
||||
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
|
||||
@ -234,6 +237,9 @@ class DistributionSummary(object):
|
||||
self.etag = None
|
||||
self.streaming = False
|
||||
|
||||
def __repr__(self):
|
||||
return "DistributionSummary:%s" % self.domain_name
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'TrustedSigners':
|
||||
self.trusted_signers = TrustedSigners()
|
||||
@ -295,6 +301,9 @@ class Distribution(object):
|
||||
self._bucket = None
|
||||
self._object_class = Object
|
||||
|
||||
def __repr__(self):
|
||||
return "Distribution:%s" % self.domain_name
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'DistributionConfig':
|
||||
self.config = DistributionConfig()
|
||||
@ -350,11 +359,11 @@ class Distribution(object):
|
||||
self.config.cnames, self.config.comment,
|
||||
self.config.trusted_signers,
|
||||
self.config.default_root_object)
|
||||
if enabled != None:
|
||||
if enabled is not None:
|
||||
new_config.enabled = enabled
|
||||
if cnames != None:
|
||||
if cnames is not None:
|
||||
new_config.cnames = cnames
|
||||
if comment != None:
|
||||
if comment is not None:
|
||||
new_config.comment = comment
|
||||
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
|
||||
self.config = new_config
|
||||
@ -730,11 +739,11 @@ class StreamingDistribution(Distribution):
|
||||
self.config.cnames,
|
||||
self.config.comment,
|
||||
self.config.trusted_signers)
|
||||
if enabled != None:
|
||||
if enabled is not None:
|
||||
new_config.enabled = enabled
|
||||
if cnames != None:
|
||||
if cnames is not None:
|
||||
new_config.cnames = cnames
|
||||
if comment != None:
|
||||
if comment is not None:
|
||||
new_config.comment = comment
|
||||
self.etag = self.connection.set_streaming_distribution_config(self.id,
|
||||
self.etag,
|
||||
|
||||
@ -52,7 +52,7 @@ class OriginAccessIdentity(object):
|
||||
new_config = OriginAccessIdentityConfig(self.connection,
|
||||
self.config.caller_reference,
|
||||
self.config.comment)
|
||||
if comment != None:
|
||||
if comment is not None:
|
||||
new_config.comment = comment
|
||||
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
|
||||
self.config = new_config
|
||||
|
||||
@ -75,7 +75,7 @@ class InvalidationBatch(object):
|
||||
|
||||
def to_xml(self):
|
||||
"""Get this batch as XML"""
|
||||
assert self.connection != None
|
||||
assert self.connection is not None
|
||||
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version
|
||||
for p in self.paths:
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -32,23 +32,10 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
import boto.cloudsearch.layer1
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='cloudsearch.us-east-1.amazonaws.com',
|
||||
connection_cls=boto.cloudsearch.layer1.Layer1),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='cloudsearch.eu-west-1.amazonaws.com',
|
||||
connection_cls=boto.cloudsearch.layer1.Layer1),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='cloudsearch.us-west-1.amazonaws.com',
|
||||
connection_cls=boto.cloudsearch.layer1.Layer1),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='cloudsearch.us-west-2.amazonaws.com',
|
||||
connection_cls=boto.cloudsearch.layer1.Layer1),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='cloudsearch.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=boto.cloudsearch.layer1.Layer1),
|
||||
|
||||
]
|
||||
return get_regions(
|
||||
'cloudsearch',
|
||||
connection_cls=boto.cloudsearch.layer1.Layer1
|
||||
)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -46,7 +46,7 @@ class Layer1(AWSQueryConnection):
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
api_version=None, security_token=None,
|
||||
validate_certs=True):
|
||||
validate_certs=True, profile_name=None):
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
@ -66,7 +66,8 @@ class Layer1(AWSQueryConnection):
|
||||
https_connection_factory=https_connection_factory,
|
||||
path=path,
|
||||
security_token=security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
@ -88,7 +89,7 @@ class Layer1(AWSQueryConnection):
|
||||
for p in doc_path:
|
||||
inner = inner.get(p)
|
||||
if not inner:
|
||||
return None if list_marker == None else []
|
||||
return None if list_marker is None else []
|
||||
if isinstance(inner, list):
|
||||
return inner
|
||||
else:
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -31,14 +31,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.cloudtrail.layer1 import CloudTrailConnection
|
||||
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='cloudtrail.us-east-1.amazonaws.com',
|
||||
connection_cls=CloudTrailConnection),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='cloudtrail.us-west-2.amazonaws.com',
|
||||
connection_cls=CloudTrailConnection),
|
||||
]
|
||||
return get_regions('cloudtrail', connection_cls=CloudTrailConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -45,6 +45,7 @@ Handles basic connections to AWS
|
||||
|
||||
from __future__ import with_statement
|
||||
import base64
|
||||
from datetime import datetime
|
||||
import errno
|
||||
import httplib
|
||||
import os
|
||||
@ -423,7 +424,7 @@ class AWSAuthConnection(object):
|
||||
https_connection_factory=None, path='/',
|
||||
provider='aws', security_token=None,
|
||||
suppress_consec_slashes=True,
|
||||
validate_certs=True):
|
||||
validate_certs=True, profile_name=None):
|
||||
"""
|
||||
:type host: str
|
||||
:param host: The host to make the connection to
|
||||
@ -434,6 +435,10 @@ class AWSAuthConnection(object):
|
||||
:keyword str aws_secret_access_key: Your AWS Secret Access Key
|
||||
(provided by Amazon). If none is specified, the value in your
|
||||
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
|
||||
:keyword str security_token: The security token associated with
|
||||
temporary credentials issued by STS. Optional unless using
|
||||
temporary credentials. If none is specified, the environment
|
||||
variable ``AWS_SECURITY_TOKEN`` is used if defined.
|
||||
|
||||
:type is_secure: boolean
|
||||
:param is_secure: Whether the connection is over SSL
|
||||
@ -464,6 +469,10 @@ class AWSAuthConnection(object):
|
||||
:type validate_certs: bool
|
||||
:param validate_certs: Controls whether SSL certificates
|
||||
will be validated or not. Defaults to True.
|
||||
|
||||
:type profile_name: str
|
||||
:param profile_name: Override usual Credentials section in config
|
||||
file to use a named set of keys instead.
|
||||
"""
|
||||
self.suppress_consec_slashes = suppress_consec_slashes
|
||||
self.num_retries = 6
|
||||
@ -485,8 +494,11 @@ class AWSAuthConnection(object):
|
||||
"support this feature are not available. Certificate "
|
||||
"validation is only supported when running under Python "
|
||||
"2.6 or later.")
|
||||
self.ca_certificates_file = config.get_value(
|
||||
certs_file = config.get_value(
|
||||
'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
|
||||
if certs_file == 'system':
|
||||
certs_file = None
|
||||
self.ca_certificates_file = certs_file
|
||||
if port:
|
||||
self.port = port
|
||||
else:
|
||||
@ -542,7 +554,8 @@ class AWSAuthConnection(object):
|
||||
self.provider = Provider(self._provider_type,
|
||||
aws_access_key_id,
|
||||
aws_secret_access_key,
|
||||
security_token)
|
||||
security_token,
|
||||
profile_name)
|
||||
|
||||
# Allow config file to override default host, port, and host header.
|
||||
if self.provider.host:
|
||||
@ -559,6 +572,7 @@ class AWSAuthConnection(object):
|
||||
host, config, self.provider, self._required_auth_capability())
|
||||
if getattr(self, 'AuthServiceName', None) is not None:
|
||||
self.auth_service_name = self.AuthServiceName
|
||||
self.request_hook = None
|
||||
|
||||
def __repr__(self):
|
||||
return '%s:%s' % (self.__class__.__name__, self.host)
|
||||
@ -599,6 +613,10 @@ class AWSAuthConnection(object):
|
||||
gs_secret_access_key = aws_secret_access_key
|
||||
secret_key = aws_secret_access_key
|
||||
|
||||
def profile_name(self):
|
||||
return self.provider.profile_name
|
||||
profile_name = property(profile_name)
|
||||
|
||||
def get_path(self, path='/'):
|
||||
# The default behavior is to suppress consecutive slashes for reasons
|
||||
# discussed at
|
||||
@ -680,7 +698,7 @@ class AWSAuthConnection(object):
|
||||
self.proxy_port = self.port
|
||||
|
||||
self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
|
||||
self.use_proxy = (self.proxy != None)
|
||||
self.use_proxy = (self.proxy is not None)
|
||||
|
||||
def get_http_connection(self, host, port, is_secure):
|
||||
conn = self._pool.get_http_connection(host, port, is_secure)
|
||||
@ -806,9 +824,12 @@ class AWSAuthConnection(object):
|
||||
h = httplib.HTTPConnection(host)
|
||||
|
||||
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
|
||||
boto.log.debug("wrapping ssl socket for proxied connection; "
|
||||
"CA certificate file=%s",
|
||||
self.ca_certificates_file)
|
||||
msg = "wrapping ssl socket for proxied connection; "
|
||||
if self.ca_certificates_file:
|
||||
msg += "CA certificate file=%s" %self.ca_certificates_file
|
||||
else:
|
||||
msg += "using system provided SSL certs"
|
||||
boto.log.debug(msg)
|
||||
key_file = self.http_connection_kwargs.get('key_file', None)
|
||||
cert_file = self.http_connection_kwargs.get('cert_file', None)
|
||||
sslSock = ssl.wrap_socket(sock, keyfile=key_file,
|
||||
@ -847,6 +868,9 @@ class AWSAuthConnection(object):
|
||||
except AttributeError:
|
||||
request.headers['Host'] = self.host.split(':', 1)[0]
|
||||
|
||||
def set_request_hook(self, hook):
|
||||
self.request_hook = hook
|
||||
|
||||
def _mexe(self, request, sender=None, override_num_retries=None,
|
||||
retry_handler=None):
|
||||
"""
|
||||
@ -887,8 +911,9 @@ class AWSAuthConnection(object):
|
||||
# the port info. All others should be now be up to date and
|
||||
# not include the port.
|
||||
if 's3' not in self._required_auth_capability():
|
||||
self.set_host_header(request)
|
||||
|
||||
if not getattr(self, 'anon', False):
|
||||
self.set_host_header(request)
|
||||
request.start_time = datetime.now()
|
||||
if callable(sender):
|
||||
response = sender(connection, request.method, request.path,
|
||||
request.body, request.headers)
|
||||
@ -929,6 +954,8 @@ class AWSAuthConnection(object):
|
||||
else:
|
||||
self.put_http_connection(request.host, request.port,
|
||||
self.is_secure, connection)
|
||||
if self.request_hook is not None:
|
||||
self.request_hook.handle_request_data(request, response)
|
||||
return response
|
||||
else:
|
||||
scheme, request.host, request.path, \
|
||||
@ -969,6 +996,8 @@ class AWSAuthConnection(object):
|
||||
# and stil haven't succeeded. So, if we have a response object,
|
||||
# use it to raise an exception.
|
||||
# Otherwise, raise the exception that must have already happened.
|
||||
if self.request_hook is not None:
|
||||
self.request_hook.handle_request_data(request, response, error=True)
|
||||
if response:
|
||||
raise BotoServerError(response.status, response.reason, body)
|
||||
elif e:
|
||||
@ -982,11 +1011,11 @@ class AWSAuthConnection(object):
|
||||
path = self.get_path(path)
|
||||
if auth_path is not None:
|
||||
auth_path = self.get_path(auth_path)
|
||||
if params == None:
|
||||
if params is None:
|
||||
params = {}
|
||||
else:
|
||||
params = params.copy()
|
||||
if headers == None:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
else:
|
||||
headers = headers.copy()
|
||||
@ -1033,14 +1062,15 @@ class AWSQueryConnection(AWSAuthConnection):
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, host=None, debug=0,
|
||||
https_connection_factory=None, path='/', security_token=None,
|
||||
validate_certs=True):
|
||||
validate_certs=True, profile_name=None):
|
||||
super(AWSQueryConnection, self).__init__(host, aws_access_key_id,
|
||||
aws_secret_access_key,
|
||||
is_secure, port, proxy,
|
||||
proxy_port, proxy_user, proxy_pass,
|
||||
debug, https_connection_factory, path,
|
||||
security_token=security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return []
|
||||
|
||||
@ -0,0 +1,41 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
"""
|
||||
Get all available regions for the AWS Datapipeline service.
|
||||
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.datapipeline.layer1 import DataPipelineConnection
|
||||
return get_regions('datapipeline', connection_cls=DataPipelineConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
for region in regions():
|
||||
if region.name == region_name:
|
||||
return region.connect(**kw_params)
|
||||
return None
|
||||
@ -85,7 +85,7 @@ class DataPipelineConnection(AWSQueryConnection):
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
region = kwargs.get('region')
|
||||
region = kwargs.pop('region', None)
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -31,32 +31,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.directconnect.layer1 import DirectConnectConnection
|
||||
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='directconnect.us-east-1.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='directconnect.us-west-1.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='directconnect.us-west-2.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='directconnect.eu-west-1.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='directconnect.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='directconnect.ap-southeast-2.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='ap-southeast-3',
|
||||
endpoint='directconnect.ap-southeast-3.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
RegionInfo(name='sa-east-1',
|
||||
endpoint='directconnect.sa-east-1.amazonaws.com',
|
||||
connection_cls=DirectConnectConnection),
|
||||
]
|
||||
return get_regions('directconnect', connection_cls=DirectConnectConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -32,37 +32,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
import boto.dynamodb.layer2
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='dynamodb.us-east-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='us-gov-west-1',
|
||||
endpoint='dynamodb.us-gov-west-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='dynamodb.us-west-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='dynamodb.us-west-2.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='dynamodb.ap-northeast-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='dynamodb.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='dynamodb.ap-southeast-2.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='dynamodb.eu-west-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='sa-east-1',
|
||||
endpoint='dynamodb.sa-east-1.amazonaws.com',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
RegionInfo(name='cn-north-1',
|
||||
endpoint='dynamodb.cn-north-1.amazonaws.com.cn',
|
||||
connection_cls=boto.dynamodb.layer2.Layer2),
|
||||
]
|
||||
return get_regions('dynamodb', connection_cls=boto.dynamodb.layer2.Layer2)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -41,13 +41,13 @@ class Item(dict):
|
||||
self._updates = None
|
||||
self._hash_key_name = self.table.schema.hash_key_name
|
||||
self._range_key_name = self.table.schema.range_key_name
|
||||
if attrs == None:
|
||||
if attrs is None:
|
||||
attrs = {}
|
||||
if hash_key == None:
|
||||
if hash_key is None:
|
||||
hash_key = attrs.get(self._hash_key_name, None)
|
||||
self[self._hash_key_name] = hash_key
|
||||
if self._range_key_name:
|
||||
if range_key == None:
|
||||
if range_key is None:
|
||||
range_key = attrs.get(self._range_key_name, None)
|
||||
self[self._range_key_name] = range_key
|
||||
self._updates = {}
|
||||
|
||||
@ -74,7 +74,7 @@ class Layer1(AWSAuthConnection):
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
debug=0, security_token=None, region=None,
|
||||
validate_certs=True, validate_checksums=True):
|
||||
validate_certs=True, validate_checksums=True, profile_name=None):
|
||||
if not region:
|
||||
region_name = boto.config.get('DynamoDB', 'region',
|
||||
self.DefaultRegionName)
|
||||
@ -89,7 +89,8 @@ class Layer1(AWSAuthConnection):
|
||||
aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
debug=debug, security_token=security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
self.throughput_exceeded_events = 0
|
||||
self._validate_checksums = boto.config.getbool(
|
||||
'DynamoDB', 'validate_checksums', validate_checksums)
|
||||
|
||||
@ -145,11 +145,13 @@ class Layer2(object):
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
debug=0, security_token=None, region=None,
|
||||
validate_certs=True, dynamizer=LossyFloatDynamizer):
|
||||
validate_certs=True, dynamizer=LossyFloatDynamizer,
|
||||
profile_name=None):
|
||||
self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
debug, security_token, region,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
self.dynamizer = dynamizer()
|
||||
|
||||
def use_decimals(self):
|
||||
|
||||
@ -136,6 +136,9 @@ def dynamize_value(val):
|
||||
|
||||
class Binary(object):
|
||||
def __init__(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
raise TypeError('Value must be a string of binary data!')
|
||||
|
||||
self.value = value
|
||||
|
||||
def encode(self):
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -32,37 +32,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.dynamodb2.layer1 import DynamoDBConnection
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='dynamodb.us-east-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='us-gov-west-1',
|
||||
endpoint='dynamodb.us-gov-west-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='dynamodb.us-west-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='dynamodb.us-west-2.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='dynamodb.eu-west-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='dynamodb.ap-northeast-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='dynamodb.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='dynamodb.ap-southeast-2.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='sa-east-1',
|
||||
endpoint='dynamodb.sa-east-1.amazonaws.com',
|
||||
connection_cls=DynamoDBConnection),
|
||||
RegionInfo(name='cn-north-1',
|
||||
endpoint='dynamodb.cn-north-1.amazonaws.com.cn',
|
||||
connection_cls=DynamoDBConnection),
|
||||
]
|
||||
return get_regions('dynamodb', connection_cls=DynamoDBConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -72,3 +72,7 @@ class UnknownFilterTypeError(DynamoDBError):
|
||||
|
||||
class QueryError(DynamoDBError):
|
||||
pass
|
||||
|
||||
|
||||
class ItemNotFound(DynamoDBError):
|
||||
pass
|
||||
|
||||
@ -323,7 +323,10 @@ class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
|
||||
projection_type = 'INCLUDE'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
throughput = kwargs.pop('throughput', None)
|
||||
IncludeIndex.__init__(self, *args, **kwargs)
|
||||
if throughput:
|
||||
kwargs['throughput'] = throughput
|
||||
GlobalBaseIndexField.__init__(self, *args, **kwargs)
|
||||
|
||||
def schema(self):
|
||||
@ -331,4 +334,4 @@ class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
|
||||
schema_data = IncludeIndex.schema(self)
|
||||
# Also the throughput.
|
||||
schema_data.update(GlobalBaseIndexField.schema(self))
|
||||
return schema_data
|
||||
return schema_data
|
||||
|
||||
@ -20,7 +20,7 @@ class ResultSet(object):
|
||||
... print res['username']
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
def __init__(self, max_page_size=None):
|
||||
super(ResultSet, self).__init__()
|
||||
self.the_callable = None
|
||||
self.call_args = []
|
||||
@ -29,6 +29,9 @@ class ResultSet(object):
|
||||
self._offset = -1
|
||||
self._results_left = True
|
||||
self._last_key_seen = None
|
||||
self._fetches = 0
|
||||
self._max_page_size = max_page_size
|
||||
self._limit = None
|
||||
|
||||
@property
|
||||
def first_key(self):
|
||||
@ -65,6 +68,12 @@ class ResultSet(object):
|
||||
self.fetch_more()
|
||||
|
||||
if self._offset < len(self._results):
|
||||
if self._limit is not None:
|
||||
self._limit -= 1
|
||||
|
||||
if self._limit < 0:
|
||||
raise StopIteration()
|
||||
|
||||
return self._results[self._offset]
|
||||
else:
|
||||
raise StopIteration()
|
||||
@ -92,6 +101,14 @@ class ResultSet(object):
|
||||
'You must supply an object or function to be called.'
|
||||
)
|
||||
|
||||
# We pop the ``limit``, if present, to track how many we should return
|
||||
# to the user. This isn't the same as the ``limit`` that the low-level
|
||||
# DDB api calls use (which limit page size, not the overall result set).
|
||||
self._limit = kwargs.pop('limit', None)
|
||||
|
||||
if self._limit < 0:
|
||||
self._limit = None
|
||||
|
||||
self.the_callable = the_callable
|
||||
self.call_args = args
|
||||
self.call_kwargs = kwargs
|
||||
@ -111,19 +128,34 @@ class ResultSet(object):
|
||||
if self._last_key_seen is not None:
|
||||
kwargs[self.first_key] = self._last_key_seen
|
||||
|
||||
# If the page size is greater than limit set them
|
||||
# to the same value
|
||||
if self._limit and self._max_page_size > self._limit:
|
||||
self._max_page_size = self._limit
|
||||
|
||||
# Put in the max page size.
|
||||
if self._max_page_size is not None:
|
||||
kwargs['limit'] = self._max_page_size
|
||||
elif self._limit is not None:
|
||||
# If max_page_size is not set and limit is available
|
||||
# use it as the page size
|
||||
kwargs['limit'] = self._limit
|
||||
|
||||
results = self.the_callable(*args, **kwargs)
|
||||
self._fetches += 1
|
||||
new_results = results.get('results', [])
|
||||
self._last_key_seen = results.get('last_key', None)
|
||||
|
||||
if len(new_results):
|
||||
self._results.extend(results['results'])
|
||||
|
||||
# Decrease the limit, if it's present.
|
||||
if self.call_kwargs.get('limit'):
|
||||
self.call_kwargs['limit'] -= len(results['results'])
|
||||
# and if limit hits zero, we don't have any more
|
||||
# results to look for
|
||||
if 0 == self.call_kwargs['limit']:
|
||||
# Check the limit, if it's present.
|
||||
if self._limit is not None and self._limit >= 0:
|
||||
limit = self._limit
|
||||
limit -= len(results['results'])
|
||||
# If we've exceeded the limit, we don't have any more
|
||||
# results to look for.
|
||||
if limit <= 0:
|
||||
self._results_left = False
|
||||
|
||||
if self._last_key_seen is None:
|
||||
|
||||
@ -8,6 +8,7 @@ from boto.dynamodb2.items import Item
|
||||
from boto.dynamodb2.layer1 import DynamoDBConnection
|
||||
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
|
||||
from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS
|
||||
from boto.exception import JSONResponseError
|
||||
|
||||
|
||||
class Table(object):
|
||||
@ -169,7 +170,7 @@ class Table(object):
|
||||
... ],
|
||||
... throughput={
|
||||
... 'read':10,
|
||||
... 'write":10,
|
||||
... 'write':10,
|
||||
... }),
|
||||
... ])
|
||||
|
||||
@ -436,7 +437,7 @@ class Table(object):
|
||||
|
||||
return raw_key
|
||||
|
||||
def get_item(self, consistent=False, **kwargs):
|
||||
def get_item(self, consistent=False, attributes=None, **kwargs):
|
||||
"""
|
||||
Fetches an item (record) from a table in DynamoDB.
|
||||
|
||||
@ -448,6 +449,10 @@ class Table(object):
|
||||
a consistent (but more expensive) read from DynamoDB.
|
||||
(Default: ``False``)
|
||||
|
||||
Optionally accepts an ``attributes`` parameter, which should be a
|
||||
list of fieldname to fetch. (Default: ``None``, which means all fields
|
||||
should be fetched)
|
||||
|
||||
Returns an ``Item`` instance containing all the data for that record.
|
||||
|
||||
Example::
|
||||
@ -480,12 +485,54 @@ class Table(object):
|
||||
item_data = self.connection.get_item(
|
||||
self.table_name,
|
||||
raw_key,
|
||||
attributes_to_get=attributes,
|
||||
consistent_read=consistent
|
||||
)
|
||||
if 'Item' not in item_data:
|
||||
raise exceptions.ItemNotFound("Item %s couldn't be found." % kwargs)
|
||||
item = Item(self)
|
||||
item.load(item_data)
|
||||
return item
|
||||
|
||||
def has_item(self, **kwargs):
|
||||
"""
|
||||
Return whether an item (record) exists within a table in DynamoDB.
|
||||
|
||||
To specify the key of the item you'd like to get, you can specify the
|
||||
key attributes as kwargs.
|
||||
|
||||
Optionally accepts a ``consistent`` parameter, which should be a
|
||||
boolean. If you provide ``True``, it will perform
|
||||
a consistent (but more expensive) read from DynamoDB.
|
||||
(Default: ``False``)
|
||||
|
||||
Optionally accepts an ``attributes`` parameter, which should be a
|
||||
list of fieldnames to fetch. (Default: ``None``, which means all fields
|
||||
should be fetched)
|
||||
|
||||
Returns ``True`` if an ``Item`` is present, ``False`` if not.
|
||||
|
||||
Example::
|
||||
|
||||
# Simple, just hash-key schema.
|
||||
>>> users.has_item(username='johndoe')
|
||||
True
|
||||
|
||||
# Complex schema, item not present.
|
||||
>>> users.has_item(
|
||||
... username='johndoe',
|
||||
... date_joined='2014-01-07'
|
||||
... )
|
||||
False
|
||||
|
||||
"""
|
||||
try:
|
||||
self.get_item(**kwargs)
|
||||
except (JSONResponseError, exceptions.ItemNotFound):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def lookup(self, *args, **kwargs):
|
||||
"""
|
||||
Look up an entry in DynamoDB. This is mostly backwards compatible
|
||||
@ -524,7 +571,6 @@ class Table(object):
|
||||
data[self.schema[x].name] = arg
|
||||
return Item(self, data=data)
|
||||
|
||||
|
||||
def put_item(self, data, overwrite=False):
|
||||
"""
|
||||
Saves an entire item to DynamoDB.
|
||||
@ -755,7 +801,7 @@ class Table(object):
|
||||
return filters
|
||||
|
||||
def query(self, limit=None, index=None, reverse=False, consistent=False,
|
||||
attributes=None, **filter_kwargs):
|
||||
attributes=None, max_page_size=None, **filter_kwargs):
|
||||
"""
|
||||
Queries for a set of matching items in a DynamoDB table.
|
||||
|
||||
@ -790,6 +836,12 @@ class Table(object):
|
||||
from DynamoDB. This uses the ``AttributesToGet`` and set's
|
||||
``Select`` to ``SPECIFIC_ATTRIBUTES`` API.
|
||||
|
||||
Optionally accepts a ``max_page_size`` parameter, which should be an
|
||||
integer count of the maximum number of items to retrieve
|
||||
**per-request**. This is useful in making faster requests & prevent
|
||||
the scan from drowning out other queries. (Default: ``None`` -
|
||||
fetch as many as DynamoDB will return)
|
||||
|
||||
Returns a ``ResultSet``, which transparently handles the pagination of
|
||||
results you get back.
|
||||
|
||||
@ -830,17 +882,24 @@ class Table(object):
|
||||
|
||||
"""
|
||||
if self.schema:
|
||||
if len(self.schema) == 1 and len(filter_kwargs) <= 1:
|
||||
raise exceptions.QueryError(
|
||||
"You must specify more than one key to filter on."
|
||||
)
|
||||
if len(self.schema) == 1:
|
||||
if len(filter_kwargs) <= 1:
|
||||
if not self.global_indexes or not len(self.global_indexes):
|
||||
# If the schema only has one field, there's <= 1 filter
|
||||
# param & no Global Secondary Indexes, this is user
|
||||
# error. Bail early.
|
||||
raise exceptions.QueryError(
|
||||
"You must specify more than one key to filter on."
|
||||
)
|
||||
|
||||
if attributes is not None:
|
||||
select = 'SPECIFIC_ATTRIBUTES'
|
||||
else:
|
||||
select = None
|
||||
|
||||
results = ResultSet()
|
||||
results = ResultSet(
|
||||
max_page_size=max_page_size
|
||||
)
|
||||
kwargs = filter_kwargs.copy()
|
||||
kwargs.update({
|
||||
'limit': limit,
|
||||
@ -848,7 +907,7 @@ class Table(object):
|
||||
'reverse': reverse,
|
||||
'consistent': consistent,
|
||||
'select': select,
|
||||
'attributes_to_get': attributes
|
||||
'attributes_to_get': attributes,
|
||||
})
|
||||
results.to_call(self._query, **kwargs)
|
||||
return results
|
||||
@ -961,7 +1020,7 @@ class Table(object):
|
||||
}
|
||||
|
||||
def scan(self, limit=None, segment=None, total_segments=None,
|
||||
**filter_kwargs):
|
||||
max_page_size=None, attributes=None, **filter_kwargs):
|
||||
"""
|
||||
Scans across all items within a DynamoDB table.
|
||||
|
||||
@ -977,6 +1036,26 @@ class Table(object):
|
||||
count of the total number of items to return. (Default: ``None`` -
|
||||
all results)
|
||||
|
||||
Optionally accepts a ``segment`` parameter, which should be an integer
|
||||
of the segment to retrieve on. Please see the documentation about
|
||||
Parallel Scans (Default: ``None`` - no segments)
|
||||
|
||||
Optionally accepts a ``total_segments`` parameter, which should be an
|
||||
integer count of number of segments to divide the table into.
|
||||
Please see the documentation about Parallel Scans (Default: ``None`` -
|
||||
no segments)
|
||||
|
||||
Optionally accepts a ``max_page_size`` parameter, which should be an
|
||||
integer count of the maximum number of items to retrieve
|
||||
**per-request**. This is useful in making faster requests & prevent
|
||||
the scan from drowning out other queries. (Default: ``None`` -
|
||||
fetch as many as DynamoDB will return)
|
||||
|
||||
Optionally accepts an ``attributes`` parameter, which should be a
|
||||
tuple. If you provide any attributes only these will be fetched
|
||||
from DynamoDB. This uses the ``AttributesToGet`` and set's
|
||||
``Select`` to ``SPECIFIC_ATTRIBUTES`` API.
|
||||
|
||||
Returns a ``ResultSet``, which transparently handles the pagination of
|
||||
results you get back.
|
||||
|
||||
@ -1003,18 +1082,21 @@ class Table(object):
|
||||
'Alice'
|
||||
|
||||
"""
|
||||
results = ResultSet()
|
||||
results = ResultSet(
|
||||
max_page_size=max_page_size
|
||||
)
|
||||
kwargs = filter_kwargs.copy()
|
||||
kwargs.update({
|
||||
'limit': limit,
|
||||
'segment': segment,
|
||||
'total_segments': total_segments,
|
||||
'attributes': attributes,
|
||||
})
|
||||
results.to_call(self._scan, **kwargs)
|
||||
return results
|
||||
|
||||
def _scan(self, limit=None, exclusive_start_key=None, segment=None,
|
||||
total_segments=None, **filter_kwargs):
|
||||
total_segments=None, attributes=None, **filter_kwargs):
|
||||
"""
|
||||
The internal method that performs the actual scan. Used extensively
|
||||
by ``ResultSet`` to perform each (paginated) request.
|
||||
@ -1023,6 +1105,7 @@ class Table(object):
|
||||
'limit': limit,
|
||||
'segment': segment,
|
||||
'total_segments': total_segments,
|
||||
'attributes_to_get': attributes,
|
||||
}
|
||||
|
||||
if exclusive_start_key:
|
||||
|
||||
@ -24,21 +24,10 @@ This module provides an interface to the Elastic Compute Cloud (EC2)
|
||||
service from AWS.
|
||||
"""
|
||||
from boto.ec2.connection import EC2Connection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
|
||||
|
||||
RegionData = {
|
||||
'us-east-1': 'ec2.us-east-1.amazonaws.com',
|
||||
'us-gov-west-1': 'ec2.us-gov-west-1.amazonaws.com',
|
||||
'us-west-1': 'ec2.us-west-1.amazonaws.com',
|
||||
'us-west-2': 'ec2.us-west-2.amazonaws.com',
|
||||
'sa-east-1': 'ec2.sa-east-1.amazonaws.com',
|
||||
'eu-west-1': 'ec2.eu-west-1.amazonaws.com',
|
||||
'ap-northeast-1': 'ec2.ap-northeast-1.amazonaws.com',
|
||||
'ap-southeast-1': 'ec2.ap-southeast-1.amazonaws.com',
|
||||
'ap-southeast-2': 'ec2.ap-southeast-2.amazonaws.com',
|
||||
'cn-north-1': 'ec2.cn-north-1.amazonaws.com.cn',
|
||||
}
|
||||
RegionData = load_regions().get('ec2', {})
|
||||
|
||||
|
||||
def regions(**kw_params):
|
||||
@ -51,13 +40,7 @@ def regions(**kw_params):
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
|
||||
"""
|
||||
regions = []
|
||||
for region_name in RegionData:
|
||||
region = RegionInfo(name=region_name,
|
||||
endpoint=RegionData[region_name],
|
||||
connection_cls=EC2Connection)
|
||||
regions.append(region)
|
||||
return regions
|
||||
return get_regions('ec2', connection_cls=EC2Connection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -89,14 +89,23 @@ class Address(EC2Object):
|
||||
|
||||
delete = release
|
||||
|
||||
def associate(self, instance_id, dry_run=False):
|
||||
def associate(self, instance_id, allow_reassociation=False, dry_run=False):
|
||||
"""
|
||||
Associate this Elastic IP address with a currently running instance.
|
||||
:see: :meth:`boto.ec2.connection.EC2Connection.associate_address`
|
||||
"""
|
||||
if self.allocation_id:
|
||||
return self.connection.associate_address(
|
||||
instance_id,
|
||||
self.public_ip,
|
||||
allocation_id=self.allocation_id,
|
||||
allow_reassociation=allow_reassociation,
|
||||
dry_run=dry_run
|
||||
)
|
||||
return self.connection.associate_address(
|
||||
instance_id,
|
||||
self.public_ip,
|
||||
allow_reassociation=allow_reassociation,
|
||||
dry_run=dry_run
|
||||
)
|
||||
|
||||
|
||||
@ -31,7 +31,7 @@ import base64
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.ec2.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
from boto.ec2.autoscale.request import Request
|
||||
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
|
||||
from boto.ec2.autoscale.group import AutoScalingGroup
|
||||
@ -44,19 +44,9 @@ from boto.ec2.autoscale.policy import TerminationPolicies
|
||||
from boto.ec2.autoscale.instance import Instance
|
||||
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
|
||||
from boto.ec2.autoscale.tag import Tag
|
||||
from boto.ec2.autoscale.limits import AccountLimits
|
||||
|
||||
RegionData = {
|
||||
'us-east-1': 'autoscaling.us-east-1.amazonaws.com',
|
||||
'us-gov-west-1': 'autoscaling.us-gov-west-1.amazonaws.com',
|
||||
'us-west-1': 'autoscaling.us-west-1.amazonaws.com',
|
||||
'us-west-2': 'autoscaling.us-west-2.amazonaws.com',
|
||||
'sa-east-1': 'autoscaling.sa-east-1.amazonaws.com',
|
||||
'eu-west-1': 'autoscaling.eu-west-1.amazonaws.com',
|
||||
'ap-northeast-1': 'autoscaling.ap-northeast-1.amazonaws.com',
|
||||
'ap-southeast-1': 'autoscaling.ap-southeast-1.amazonaws.com',
|
||||
'ap-southeast-2': 'autoscaling.ap-southeast-2.amazonaws.com',
|
||||
'cn-north-1': 'autoscaling.cn-north-1.amazonaws.com.cn',
|
||||
}
|
||||
RegionData = load_regions().get('autoscaling', {})
|
||||
|
||||
|
||||
def regions():
|
||||
@ -66,13 +56,7 @@ def regions():
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.RegionInfo` instances
|
||||
"""
|
||||
regions = []
|
||||
for region_name in RegionData:
|
||||
region = RegionInfo(name=region_name,
|
||||
endpoint=RegionData[region_name],
|
||||
connection_cls=AutoScaleConnection)
|
||||
regions.append(region)
|
||||
return regions
|
||||
return get_regions('autoscaling', connection_cls=AutoScaleConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
@ -103,18 +87,22 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
security_token=None, validate_certs=True):
|
||||
security_token=None, validate_certs=True, profile_name=None,
|
||||
use_block_device_types=False):
|
||||
"""
|
||||
Init method to create a new connection to the AutoScaling service.
|
||||
|
||||
B{Note:} The host argument is overridden by the host specified in the
|
||||
boto configuration file.
|
||||
|
||||
|
||||
"""
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint,
|
||||
AutoScaleConnection)
|
||||
self.region = region
|
||||
self.use_block_device_types = use_block_device_types
|
||||
super(AutoScaleConnection, self).__init__(aws_access_key_id,
|
||||
aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
@ -122,7 +110,8 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path=path,
|
||||
security_token=security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
@ -176,6 +165,8 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
params['DefaultCooldown'] = as_group.default_cooldown
|
||||
if as_group.placement_group:
|
||||
params['PlacementGroup'] = as_group.placement_group
|
||||
if as_group.instance_id:
|
||||
params['InstanceId'] = as_group.instance_id
|
||||
if as_group.termination_policies:
|
||||
self.build_list_params(params, as_group.termination_policies,
|
||||
'TerminationPolicies')
|
||||
@ -190,6 +181,16 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
tag.build_params(params, i + 1)
|
||||
return self.get_object(op, params, Request)
|
||||
|
||||
def attach_instances(self, name, instance_ids):
|
||||
"""
|
||||
Attach instances to an autoscaling group.
|
||||
"""
|
||||
params = {
|
||||
'AutoScalingGroupName': name,
|
||||
}
|
||||
self.build_list_params(params, instance_ids, 'InstanceIds')
|
||||
return self.get_status('AttachInstances', params)
|
||||
|
||||
def create_auto_scaling_group(self, as_group):
|
||||
"""
|
||||
Create auto scaling group.
|
||||
@ -246,9 +247,25 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
params['AssociatePublicIpAddress'] = 'true'
|
||||
elif launch_config.associate_public_ip_address is False:
|
||||
params['AssociatePublicIpAddress'] = 'false'
|
||||
if launch_config.volume_type:
|
||||
params['VolumeType'] = launch_config.volume_type
|
||||
if launch_config.delete_on_termination:
|
||||
params['DeleteOnTermination'] = 'true'
|
||||
else:
|
||||
params['DeleteOnTermination'] = 'false'
|
||||
if launch_config.iops:
|
||||
params['Iops'] = launch_config.iops
|
||||
return self.get_object('CreateLaunchConfiguration', params,
|
||||
Request, verb='POST')
|
||||
|
||||
def get_account_limits(self):
|
||||
"""
|
||||
Returns the limits for the Auto Scaling resources currently granted for
|
||||
your AWS account.
|
||||
"""
|
||||
params = {}
|
||||
return self.get_object('DescribeAccountLimits', params, AccountLimits)
|
||||
|
||||
def create_scaling_policy(self, scaling_policy):
|
||||
"""
|
||||
Creates a new Scaling Policy.
|
||||
|
||||
@ -98,7 +98,8 @@ class AutoScalingGroup(object):
|
||||
health_check_type=None, health_check_period=None,
|
||||
placement_group=None, vpc_zone_identifier=None,
|
||||
desired_capacity=None, min_size=None, max_size=None,
|
||||
tags=None, termination_policies=None, **kwargs):
|
||||
tags=None, termination_policies=None, instance_id=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Creates a new AutoScalingGroup with the specified name.
|
||||
|
||||
@ -145,12 +146,12 @@ class AutoScalingGroup(object):
|
||||
:param placement_group: Physical location of your cluster placement
|
||||
group created in Amazon EC2.
|
||||
|
||||
:type vpc_zone_identifier: str
|
||||
:param vpc_zone_identifier: The subnet identifier of the Virtual
|
||||
Private Cloud.
|
||||
|
||||
:type vpc_zone_identifier: str or list
|
||||
:param vpc_zone_identifier: A comma-separated string or python list of
|
||||
the subnet identifiers of the Virtual Private Cloud.
|
||||
|
||||
:type tags: list
|
||||
:param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s
|
||||
:param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s
|
||||
|
||||
:type termination_policies: list
|
||||
:param termination_policies: A list of termination policies. Valid values
|
||||
@ -158,6 +159,10 @@ class AutoScalingGroup(object):
|
||||
"ClosestToNextInstanceHour", "Default". If no value is specified,
|
||||
the "Default" value is used.
|
||||
|
||||
:type instance_id: str
|
||||
:param instance_id: The ID of the Amazon EC2 instance you want to use
|
||||
to create the Auto Scaling group.
|
||||
|
||||
:rtype: :class:`boto.ec2.autoscale.group.AutoScalingGroup`
|
||||
:return: An autoscale group.
|
||||
"""
|
||||
@ -183,11 +188,14 @@ class AutoScalingGroup(object):
|
||||
self.health_check_type = health_check_type
|
||||
self.placement_group = placement_group
|
||||
self.autoscaling_group_arn = None
|
||||
if type(vpc_zone_identifier) is list:
|
||||
vpc_zone_identifier = ','.join(vpc_zone_identifier)
|
||||
self.vpc_zone_identifier = vpc_zone_identifier
|
||||
self.instances = None
|
||||
self.tags = tags or None
|
||||
termination_policies = termination_policies or []
|
||||
self.termination_policies = ListElement(termination_policies)
|
||||
self.instance_id = instance_id
|
||||
|
||||
# backwards compatible access to 'cooldown' param
|
||||
def _get_cooldown(self):
|
||||
@ -251,6 +259,8 @@ class AutoScalingGroup(object):
|
||||
self.health_check_type = value
|
||||
elif name == 'VPCZoneIdentifier':
|
||||
self.vpc_zone_identifier = value
|
||||
elif name == 'InstanceId':
|
||||
self.instance_id = value
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
@ -304,7 +314,7 @@ class AutoScalingGroup(object):
|
||||
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
|
||||
'autoscaling:EC2_INSTANCE_TERMINATE',
|
||||
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR',
|
||||
'autoscaling:TEST_NOTIFICATION'
|
||||
'autoscaling:TEST_NOTIFICATION'
|
||||
"""
|
||||
return self.connection.put_notification_configuration(self,
|
||||
topic,
|
||||
|
||||
@ -21,14 +21,16 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from datetime import datetime
|
||||
from boto.resultset import ResultSet
|
||||
from boto.ec2.elb.listelement import ListElement
|
||||
# Namespacing issue with deprecated local class
|
||||
from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM
|
||||
from boto.resultset import ResultSet
|
||||
import boto.utils
|
||||
import base64
|
||||
|
||||
|
||||
# this should use the corresponding object from boto.ec2
|
||||
|
||||
|
||||
# Currently in use by deprecated local BlockDeviceMapping class
|
||||
class Ebs(object):
|
||||
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
|
||||
self.connection = connection
|
||||
@ -65,12 +67,16 @@ class InstanceMonitoring(object):
|
||||
|
||||
|
||||
# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
|
||||
# Currently in use by deprecated code for backwards compatability
|
||||
# Removing this class can also remove the Ebs class in this same file
|
||||
class BlockDeviceMapping(object):
|
||||
def __init__(self, connection=None, device_name=None, virtual_name=None):
|
||||
def __init__(self, connection=None, device_name=None, virtual_name=None,
|
||||
ebs=None, no_device=None):
|
||||
self.connection = connection
|
||||
self.device_name = None
|
||||
self.virtual_name = None
|
||||
self.ebs = None
|
||||
self.device_name = device_name
|
||||
self.virtual_name = virtual_name
|
||||
self.ebs = ebs
|
||||
self.no_device = no_device
|
||||
|
||||
def __repr__(self):
|
||||
return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
|
||||
@ -86,6 +92,8 @@ class BlockDeviceMapping(object):
|
||||
self.device_name = value
|
||||
elif name == 'VirtualName':
|
||||
self.virtual_name = value
|
||||
elif name == 'NoDevice':
|
||||
self.no_device = bool(value)
|
||||
|
||||
|
||||
class LaunchConfiguration(object):
|
||||
@ -95,7 +103,8 @@ class LaunchConfiguration(object):
|
||||
ramdisk_id=None, block_device_mappings=None,
|
||||
instance_monitoring=False, spot_price=None,
|
||||
instance_profile_name=None, ebs_optimized=False,
|
||||
associate_public_ip_address=None):
|
||||
associate_public_ip_address=None, volume_type=None,
|
||||
delete_on_termination=True, iops=None, use_block_device_types=False):
|
||||
"""
|
||||
A launch configuration.
|
||||
|
||||
@ -147,8 +156,9 @@ class LaunchConfiguration(object):
|
||||
:param ebs_optimized: Specifies whether the instance is optimized
|
||||
for EBS I/O (true) or not (false).
|
||||
|
||||
|
||||
:type associate_public_ip_address: bool
|
||||
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
|
||||
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
|
||||
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
|
||||
"""
|
||||
self.connection = connection
|
||||
@ -170,6 +180,13 @@ class LaunchConfiguration(object):
|
||||
self.launch_configuration_arn = None
|
||||
self.ebs_optimized = ebs_optimized
|
||||
self.associate_public_ip_address = associate_public_ip_address
|
||||
self.volume_type = volume_type
|
||||
self.delete_on_termination = delete_on_termination
|
||||
self.iops = iops
|
||||
self.use_block_device_types = use_block_device_types
|
||||
|
||||
if connection is not None:
|
||||
self.use_block_device_types = connection.use_block_device_types
|
||||
|
||||
def __repr__(self):
|
||||
return 'LaunchConfiguration:%s' % self.name
|
||||
@ -178,8 +195,10 @@ class LaunchConfiguration(object):
|
||||
if name == 'SecurityGroups':
|
||||
return self.security_groups
|
||||
elif name == 'BlockDeviceMappings':
|
||||
self.block_device_mappings = ResultSet([('member',
|
||||
BlockDeviceMapping)])
|
||||
if self.use_block_device_types:
|
||||
self.block_device_mappings = BDM()
|
||||
else:
|
||||
self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
|
||||
return self.block_device_mappings
|
||||
elif name == 'InstanceMonitoring':
|
||||
self.instance_monitoring = InstanceMonitoring(self)
|
||||
@ -215,6 +234,17 @@ class LaunchConfiguration(object):
|
||||
self.instance_profile_name = value
|
||||
elif name == 'EbsOptimized':
|
||||
self.ebs_optimized = True if value.lower() == 'true' else False
|
||||
elif name == 'AssociatePublicIpAddress':
|
||||
self.associate_public_ip_address = True if value.lower() == 'true' else False
|
||||
elif name == 'VolumeType':
|
||||
self.volume_type = value
|
||||
elif name == 'DeleteOnTermination':
|
||||
if value.lower() == 'true':
|
||||
self.delete_on_termination = True
|
||||
else:
|
||||
self.delete_on_termination = False
|
||||
elif name == 'Iops':
|
||||
self.iops = int(value)
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
|
||||
44
awx/lib/site-packages/boto/ec2/autoscale/limits.py
Normal file
44
awx/lib/site-packages/boto/ec2/autoscale/limits.py
Normal file
@ -0,0 +1,44 @@
|
||||
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
class AccountLimits(object):
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.max_autoscaling_groups = None
|
||||
self.max_launch_configurations = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'AccountLimits: [%s, %s]' % (self.max_autoscaling_groups,
|
||||
self.max_launch_configurations)
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
return None
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'RequestId':
|
||||
self.request_id = value
|
||||
elif name == 'MaxNumberOfAutoScalingGroups':
|
||||
self.max_autoscaling_groups = int(value)
|
||||
elif name == 'MaxNumberOfLaunchConfigurations':
|
||||
self.max_launch_configurations = int(value)
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
@ -47,16 +47,17 @@ class Alarm(object):
|
||||
class AdjustmentType(object):
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.adjustment_types = ListElement([])
|
||||
self.adjustment_type = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'AdjustmentType:%s' % self.adjustment_types
|
||||
return 'AdjustmentType:%s' % self.adjustment_type
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'AdjustmentType':
|
||||
return self.adjustment_types
|
||||
return
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'AdjustmentType':
|
||||
self.adjustment_type = value
|
||||
return
|
||||
|
||||
|
||||
|
||||
@ -55,25 +55,26 @@ class BlockDeviceType(object):
|
||||
pass
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
lname = name.lower()
|
||||
if name == 'volumeId':
|
||||
self.volume_id = value
|
||||
elif name == 'virtualName':
|
||||
elif lname == 'virtualname':
|
||||
self.ephemeral_name = value
|
||||
elif name == 'NoDevice':
|
||||
elif lname == 'nodevice':
|
||||
self.no_device = (value == 'true')
|
||||
elif name == 'snapshotId':
|
||||
elif lname == 'snapshotid':
|
||||
self.snapshot_id = value
|
||||
elif name == 'volumeSize':
|
||||
elif lname == 'volumesize':
|
||||
self.size = int(value)
|
||||
elif name == 'status':
|
||||
elif lname == 'status':
|
||||
self.status = value
|
||||
elif name == 'attachTime':
|
||||
elif lname == 'attachtime':
|
||||
self.attach_time = value
|
||||
elif name == 'deleteOnTermination':
|
||||
elif lname == 'deleteontermination':
|
||||
self.delete_on_termination = (value == 'true')
|
||||
elif name == 'volumeType':
|
||||
elif lname == 'volumetype':
|
||||
self.volume_type = value
|
||||
elif name == 'iops':
|
||||
elif lname == 'iops':
|
||||
self.iops = int(value)
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
@ -105,14 +106,16 @@ class BlockDeviceMapping(dict):
|
||||
self.current_value = None
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'ebs' or name == 'virtualName':
|
||||
lname = name.lower()
|
||||
if lname in ['ebs', 'virtualname']:
|
||||
self.current_value = BlockDeviceType(self)
|
||||
return self.current_value
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'device' or name == 'deviceName':
|
||||
lname = name.lower()
|
||||
if lname in ['device', 'devicename']:
|
||||
self.current_name = value
|
||||
elif name == 'item':
|
||||
elif lname in ['item', 'member']:
|
||||
self[self.current_name] = self.current_value
|
||||
|
||||
def ec2_build_list_params(self, params, prefix=''):
|
||||
|
||||
@ -28,21 +28,10 @@ from boto.connection import AWSQueryConnection
|
||||
from boto.ec2.cloudwatch.metric import Metric
|
||||
from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem
|
||||
from boto.ec2.cloudwatch.datapoint import Datapoint
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
import boto
|
||||
|
||||
RegionData = {
|
||||
'us-east-1': 'monitoring.us-east-1.amazonaws.com',
|
||||
'us-gov-west-1': 'monitoring.us-gov-west-1.amazonaws.com',
|
||||
'us-west-1': 'monitoring.us-west-1.amazonaws.com',
|
||||
'us-west-2': 'monitoring.us-west-2.amazonaws.com',
|
||||
'sa-east-1': 'monitoring.sa-east-1.amazonaws.com',
|
||||
'eu-west-1': 'monitoring.eu-west-1.amazonaws.com',
|
||||
'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com',
|
||||
'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com',
|
||||
'ap-southeast-2': 'monitoring.ap-southeast-2.amazonaws.com',
|
||||
'cn-north-1': 'monitoring.cn-north-1.amazonaws.com.cn',
|
||||
}
|
||||
RegionData = load_regions().get('cloudwatch', {})
|
||||
|
||||
|
||||
def regions():
|
||||
@ -52,13 +41,7 @@ def regions():
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.RegionInfo` instances
|
||||
"""
|
||||
regions = []
|
||||
for region_name in RegionData:
|
||||
region = RegionInfo(name=region_name,
|
||||
endpoint=RegionData[region_name],
|
||||
connection_cls=CloudWatchConnection)
|
||||
regions.append(region)
|
||||
return regions
|
||||
return get_regions('cloudwatch', connection_cls=CloudWatchConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
@ -91,7 +74,7 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
security_token=None, validate_certs=True):
|
||||
security_token=None, validate_certs=True, profile_name=None):
|
||||
"""
|
||||
Init method to create a new connection to EC2 Monitoring Service.
|
||||
|
||||
@ -115,7 +98,8 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
@ -178,11 +162,11 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
metric_data['StatisticValues.Minimum'] = s['minimum']
|
||||
metric_data['StatisticValues.SampleCount'] = s['samplecount']
|
||||
metric_data['StatisticValues.Sum'] = s['sum']
|
||||
if value != None:
|
||||
if value is not None:
|
||||
msg = 'You supplied a value and statistics for a ' + \
|
||||
'metric.Posting statistics and not value.'
|
||||
boto.log.warn(msg)
|
||||
elif value != None:
|
||||
elif value is not None:
|
||||
metric_data['Value'] = v
|
||||
else:
|
||||
raise Exception('Must specify a value or statistics to put.')
|
||||
@ -273,9 +257,13 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
pairs that will be used to filter the results. The key in
|
||||
the dictionary is the name of a Dimension. The value in
|
||||
the dictionary is either a scalar value of that Dimension
|
||||
name that you want to filter on, a list of values to
|
||||
filter on or None if you want all metrics with that
|
||||
Dimension name.
|
||||
name that you want to filter on or None if you want all
|
||||
metrics with that Dimension name. To be included in the
|
||||
result a metric must contain all specified dimensions,
|
||||
although the metric may contain additional dimensions beyond
|
||||
the requested metrics. The Dimension names, and values must
|
||||
be strings between 1 and 250 characters long. A maximum of
|
||||
10 dimensions are allowed.
|
||||
|
||||
:type metric_name: str
|
||||
:param metric_name: The name of the Metric to filter against. If None,
|
||||
|
||||
@ -58,7 +58,7 @@ from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription
|
||||
from boto.ec2.bundleinstance import BundleInstanceTask
|
||||
from boto.ec2.placementgroup import PlacementGroup
|
||||
from boto.ec2.tag import Tag
|
||||
from boto.ec2.vmtype import VmType
|
||||
from boto.ec2.instancetype import InstanceType
|
||||
from boto.ec2.instancestatus import InstanceStatusSet
|
||||
from boto.ec2.volumestatus import VolumeStatusSet
|
||||
from boto.ec2.networkinterface import NetworkInterface
|
||||
@ -83,7 +83,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
api_version=None, security_token=None,
|
||||
validate_certs=True):
|
||||
validate_certs=True, profile_name=None):
|
||||
"""
|
||||
Init method to create a new connection to EC2.
|
||||
"""
|
||||
@ -98,7 +98,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
if api_version:
|
||||
self.APIVersion = api_version
|
||||
|
||||
@ -734,8 +735,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
launch instances.
|
||||
|
||||
:type security_groups: list of strings
|
||||
:param security_groups: The names of the security groups with which to
|
||||
associate instances.
|
||||
:param security_groups: The names of the EC2 classic security groups
|
||||
with which to associate instances
|
||||
|
||||
:type user_data: string
|
||||
:param user_data: The Base64-encoded MIME user data to be made
|
||||
@ -749,6 +750,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
* m1.medium
|
||||
* m1.large
|
||||
* m1.xlarge
|
||||
* m3.medium
|
||||
* m3.large
|
||||
* m3.xlarge
|
||||
* m3.2xlarge
|
||||
* c1.medium
|
||||
@ -1301,7 +1304,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
def get_spot_price_history(self, start_time=None, end_time=None,
|
||||
instance_type=None, product_description=None,
|
||||
availability_zone=None, dry_run=False,
|
||||
max_results=None):
|
||||
max_results=None, next_token=None,
|
||||
filters=None):
|
||||
"""
|
||||
Retrieve the recent history of spot instances pricing.
|
||||
|
||||
@ -1339,6 +1343,19 @@ class EC2Connection(AWSQueryConnection):
|
||||
:param max_results: The maximum number of paginated items
|
||||
per response.
|
||||
|
||||
:type next_token: str
|
||||
:param next_token: The next set of rows to return. This should
|
||||
be the value of the ``next_token`` attribute from a previous
|
||||
call to ``get_spot_price_history``.
|
||||
|
||||
:type filters: dict
|
||||
:param filters: Optional filters that can be used to limit the
|
||||
results returned. Filters are provided in the form of a
|
||||
dictionary consisting of filter names as the key and
|
||||
filter values as the value. The set of allowable filter
|
||||
names/values is dependent on the request being performed.
|
||||
Check the EC2 API guide for details.
|
||||
|
||||
:rtype: list
|
||||
:return: A list tuples containing price and timestamp.
|
||||
"""
|
||||
@ -1357,6 +1374,10 @@ class EC2Connection(AWSQueryConnection):
|
||||
params['DryRun'] = 'true'
|
||||
if max_results is not None:
|
||||
params['MaxResults'] = max_results
|
||||
if next_token:
|
||||
params['NextToken'] = next_token
|
||||
if filters:
|
||||
self.build_filter_params(params, filters)
|
||||
return self.get_list('DescribeSpotPriceHistory', params,
|
||||
[('item', SpotPriceHistory)], verb='POST')
|
||||
|
||||
@ -1424,6 +1445,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
* m1.medium
|
||||
* m1.large
|
||||
* m1.xlarge
|
||||
* m3.medium
|
||||
* m3.large
|
||||
* m3.xlarge
|
||||
* m3.2xlarge
|
||||
* c1.medium
|
||||
@ -1814,6 +1837,37 @@ class EC2Connection(AWSQueryConnection):
|
||||
|
||||
return self.get_status('AssignPrivateIpAddresses', params, verb='POST')
|
||||
|
||||
def _associate_address(self, status, instance_id=None, public_ip=None,
|
||||
allocation_id=None, network_interface_id=None,
|
||||
private_ip_address=None, allow_reassociation=False,
|
||||
dry_run=False):
|
||||
params = {}
|
||||
if instance_id is not None:
|
||||
params['InstanceId'] = instance_id
|
||||
elif network_interface_id is not None:
|
||||
params['NetworkInterfaceId'] = network_interface_id
|
||||
|
||||
# Allocation id trumps public ip in order to associate with VPCs
|
||||
if allocation_id is not None:
|
||||
params['AllocationId'] = allocation_id
|
||||
elif public_ip is not None:
|
||||
params['PublicIp'] = public_ip
|
||||
|
||||
if private_ip_address is not None:
|
||||
params['PrivateIpAddress'] = private_ip_address
|
||||
|
||||
if allow_reassociation:
|
||||
params['AllowReassociation'] = 'true'
|
||||
|
||||
if dry_run:
|
||||
params['DryRun'] = 'true'
|
||||
|
||||
if status:
|
||||
return self.get_status('AssociateAddress', params, verb='POST')
|
||||
else:
|
||||
return self.get_object('AssociateAddress', params, Address,
|
||||
verb='POST')
|
||||
|
||||
def associate_address(self, instance_id=None, public_ip=None,
|
||||
allocation_id=None, network_interface_id=None,
|
||||
private_ip_address=None, allow_reassociation=False,
|
||||
@ -1856,27 +1910,59 @@ class EC2Connection(AWSQueryConnection):
|
||||
:rtype: bool
|
||||
:return: True if successful
|
||||
"""
|
||||
params = {}
|
||||
if instance_id is not None:
|
||||
params['InstanceId'] = instance_id
|
||||
elif network_interface_id is not None:
|
||||
params['NetworkInterfaceId'] = network_interface_id
|
||||
return self._associate_address(True, instance_id=instance_id,
|
||||
public_ip=public_ip, allocation_id=allocation_id,
|
||||
network_interface_id=network_interface_id,
|
||||
private_ip_address=private_ip_address,
|
||||
allow_reassociation=allow_reassociation, dry_run=dry_run)
|
||||
|
||||
if public_ip is not None:
|
||||
params['PublicIp'] = public_ip
|
||||
elif allocation_id is not None:
|
||||
params['AllocationId'] = allocation_id
|
||||
def associate_address_object(self, instance_id=None, public_ip=None,
|
||||
allocation_id=None, network_interface_id=None,
|
||||
private_ip_address=None, allow_reassociation=False,
|
||||
dry_run=False):
|
||||
"""
|
||||
Associate an Elastic IP address with a currently running instance.
|
||||
This requires one of ``public_ip`` or ``allocation_id`` depending
|
||||
on if you're associating a VPC address or a plain EC2 address.
|
||||
|
||||
if private_ip_address is not None:
|
||||
params['PrivateIpAddress'] = private_ip_address
|
||||
When using an Allocation ID, make sure to pass ``None`` for ``public_ip``
|
||||
as EC2 expects a single parameter and if ``public_ip`` is passed boto
|
||||
will preference that instead of ``allocation_id``.
|
||||
|
||||
if allow_reassociation:
|
||||
params['AllowReassociation'] = 'true'
|
||||
:type instance_id: string
|
||||
:param instance_id: The ID of the instance
|
||||
|
||||
if dry_run:
|
||||
params['DryRun'] = 'true'
|
||||
:type public_ip: string
|
||||
:param public_ip: The public IP address for EC2 based allocations.
|
||||
|
||||
return self.get_status('AssociateAddress', params, verb='POST')
|
||||
:type allocation_id: string
|
||||
:param allocation_id: The allocation ID for a VPC-based elastic IP.
|
||||
|
||||
:type network_interface_id: string
|
||||
:param network_interface_id: The network interface ID to which
|
||||
elastic IP is to be assigned to
|
||||
|
||||
:type private_ip_address: string
|
||||
:param private_ip_address: The primary or secondary private IP address
|
||||
to associate with the Elastic IP address.
|
||||
|
||||
:type allow_reassociation: bool
|
||||
:param allow_reassociation: Specify this option to allow an Elastic IP
|
||||
address that is already associated with another network interface
|
||||
or instance to be re-associated with the specified instance or
|
||||
interface.
|
||||
|
||||
:type dry_run: bool
|
||||
:param dry_run: Set to True if the operation should not actually run.
|
||||
|
||||
:rtype: class:`boto.ec2.address.Address`
|
||||
:return: The associated address instance
|
||||
"""
|
||||
return self._associate_address(False, instance_id=instance_id,
|
||||
public_ip=public_ip, allocation_id=allocation_id,
|
||||
network_interface_id=network_interface_id,
|
||||
private_ip_address=private_ip_address,
|
||||
allow_reassociation=allow_reassociation, dry_run=dry_run)
|
||||
|
||||
def disassociate_address(self, public_ip=None, association_id=None,
|
||||
dry_run=False):
|
||||
@ -1897,10 +1983,12 @@ class EC2Connection(AWSQueryConnection):
|
||||
"""
|
||||
params = {}
|
||||
|
||||
if public_ip is not None:
|
||||
params['PublicIp'] = public_ip
|
||||
elif association_id is not None:
|
||||
# If there is an association id it trumps public ip
|
||||
# in order to successfully dissassociate with a VPC elastic ip
|
||||
if association_id is not None:
|
||||
params['AssociationId'] = association_id
|
||||
elif public_ip is not None:
|
||||
params['PublicIp'] = public_ip
|
||||
|
||||
if dry_run:
|
||||
params['DryRun'] = 'true'
|
||||
@ -4236,15 +4324,15 @@ class EC2Connection(AWSQueryConnection):
|
||||
params['DryRun'] = 'true'
|
||||
return self.get_status('DeleteNetworkInterface', params, verb='POST')
|
||||
|
||||
def get_all_vmtypes(self):
|
||||
def get_all_instance_types(self):
|
||||
"""
|
||||
Get all vmtypes available on this cloud (eucalyptus specific)
|
||||
Get all instance_types available on this cloud (eucalyptus specific)
|
||||
|
||||
:rtype: list of :class:`boto.ec2.vmtype.VmType`
|
||||
:return: The requested VmType objects
|
||||
:rtype: list of :class:`boto.ec2.instancetype.InstanceType`
|
||||
:return: The requested InstanceType objects
|
||||
"""
|
||||
params = {}
|
||||
return self.get_list('DescribeVmTypes', params, [('euca:item', VmType)], verb='POST')
|
||||
return self.get_list('DescribeInstanceTypes', params, [('item', InstanceType)], verb='POST')
|
||||
|
||||
def copy_image(self, source_region, source_image_id, name=None,
|
||||
description=None, client_token=None, dry_run=False):
|
||||
|
||||
@ -31,21 +31,10 @@ from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
|
||||
from boto.ec2.elb.instancestate import InstanceState
|
||||
from boto.ec2.elb.healthcheck import HealthCheck
|
||||
from boto.ec2.elb.listelement import ListElement
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
import boto
|
||||
|
||||
RegionData = {
|
||||
'us-east-1': 'elasticloadbalancing.us-east-1.amazonaws.com',
|
||||
'us-gov-west-1': 'elasticloadbalancing.us-gov-west-1.amazonaws.com',
|
||||
'us-west-1': 'elasticloadbalancing.us-west-1.amazonaws.com',
|
||||
'us-west-2': 'elasticloadbalancing.us-west-2.amazonaws.com',
|
||||
'sa-east-1': 'elasticloadbalancing.sa-east-1.amazonaws.com',
|
||||
'eu-west-1': 'elasticloadbalancing.eu-west-1.amazonaws.com',
|
||||
'ap-northeast-1': 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
|
||||
'ap-southeast-1': 'elasticloadbalancing.ap-southeast-1.amazonaws.com',
|
||||
'ap-southeast-2': 'elasticloadbalancing.ap-southeast-2.amazonaws.com',
|
||||
'cn-north-1': 'elasticloadbalancing.cn-north-1.amazonaws.com.cn',
|
||||
}
|
||||
RegionData = load_regions().get('elasticloadbalancing', {})
|
||||
|
||||
|
||||
def regions():
|
||||
@ -55,13 +44,7 @@ def regions():
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.RegionInfo` instances
|
||||
"""
|
||||
regions = []
|
||||
for region_name in RegionData:
|
||||
region = RegionInfo(name=region_name,
|
||||
endpoint=RegionData[region_name],
|
||||
connection_cls=ELBConnection)
|
||||
regions.append(region)
|
||||
return regions
|
||||
return get_regions('elasticloadbalancing', connection_cls=ELBConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
@ -92,7 +75,7 @@ class ELBConnection(AWSQueryConnection):
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
security_token=None, validate_certs=True):
|
||||
security_token=None, validate_certs=True, profile_name=None):
|
||||
"""
|
||||
Init method to create a new connection to EC2 Load Balancing Service.
|
||||
|
||||
@ -110,13 +93,14 @@ class ELBConnection(AWSQueryConnection):
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['ec2']
|
||||
|
||||
def build_list_params(self, params, items, label):
|
||||
if isinstance(items, str):
|
||||
if isinstance(items, basestring):
|
||||
items = [items]
|
||||
for index, item in enumerate(items):
|
||||
params[label % (index + 1)] = item
|
||||
@ -401,6 +385,7 @@ class ELBConnection(AWSQueryConnection):
|
||||
:param attribute: The attribute you wish to change.
|
||||
|
||||
* crossZoneLoadBalancing - Boolean (true)
|
||||
* accessLog - :py:class:`AccessLogAttribute` instance
|
||||
|
||||
:type value: string
|
||||
:param value: The new value for the attribute
|
||||
@ -421,6 +406,15 @@ class ELBConnection(AWSQueryConnection):
|
||||
if attribute.lower() == 'crosszoneloadbalancing':
|
||||
params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled'
|
||||
] = value
|
||||
elif attribute.lower() == 'accesslog':
|
||||
params['LoadBalancerAttributes.AccessLog.Enabled'] = \
|
||||
value.enabled and 'true' or 'false'
|
||||
params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \
|
||||
value.s3_bucket_name
|
||||
params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \
|
||||
value.s3_bucket_prefix
|
||||
params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \
|
||||
value.emit_interval
|
||||
else:
|
||||
raise ValueError('InvalidAttribute', attribute)
|
||||
return self.get_status('ModifyLoadBalancerAttributes', params,
|
||||
|
||||
@ -40,6 +40,41 @@ class CrossZoneLoadBalancingAttribute(object):
|
||||
else:
|
||||
self.enabled = False
|
||||
|
||||
class AccessLogAttribute(object):
|
||||
"""
|
||||
Represents the AccessLog segment of ELB attributes.
|
||||
"""
|
||||
def __init__(self, connection=None):
|
||||
self.enabled = None
|
||||
self.s3_bucket_name = None
|
||||
self.s3_bucket_prefix = None
|
||||
self.emit_interval = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'AccessLog(%s, %s, %s, %s)' % (
|
||||
self.enabled,
|
||||
self.s3_bucket_name,
|
||||
self.s3_bucket_prefix,
|
||||
self.emit_interval
|
||||
)
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
pass
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'Enabled':
|
||||
if value.lower() == 'true':
|
||||
self.enabled = True
|
||||
else:
|
||||
self.enabled = False
|
||||
elif name == 'S3BucketName':
|
||||
self.s3_bucket_name = value
|
||||
elif name == 'S3BucketPrefix':
|
||||
self.s3_bucket_prefix = value
|
||||
elif name == 'EmitInterval':
|
||||
self.emit_interval = int(value)
|
||||
|
||||
|
||||
class LbAttributes(object):
|
||||
"""
|
||||
Represents the Attributes of an Elastic Load Balancer.
|
||||
@ -48,14 +83,18 @@ class LbAttributes(object):
|
||||
self.connection = connection
|
||||
self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute(
|
||||
self.connection)
|
||||
self.access_log = AccessLogAttribute(self.connection)
|
||||
|
||||
def __repr__(self):
|
||||
return 'LbAttributes(%s)' % (
|
||||
repr(self.cross_zone_load_balancing))
|
||||
return 'LbAttributes(%s, %s)' % (
|
||||
repr(self.cross_zone_load_balancing),
|
||||
repr(self.access_log))
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'CrossZoneLoadBalancing':
|
||||
return self.cross_zone_load_balancing
|
||||
|
||||
if name == 'AccessLog':
|
||||
return self.access_log
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
pass
|
||||
|
||||
@ -186,7 +186,7 @@ class LoadBalancer(object):
|
||||
:param zones: The name of the zone(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(zones, str) or isinstance(zones, unicode):
|
||||
if isinstance(zones, basestring):
|
||||
zones = [zones]
|
||||
new_zones = self.connection.enable_availability_zones(self.name, zones)
|
||||
self.availability_zones = new_zones
|
||||
@ -199,7 +199,7 @@ class LoadBalancer(object):
|
||||
:param zones: The name of the zone(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(zones, str) or isinstance(zones, unicode):
|
||||
if isinstance(zones, basestring):
|
||||
zones = [zones]
|
||||
new_zones = self.connection.disable_availability_zones(self.name, zones)
|
||||
self.availability_zones = new_zones
|
||||
@ -266,7 +266,7 @@ class LoadBalancer(object):
|
||||
to add to this load balancer.
|
||||
|
||||
"""
|
||||
if isinstance(instances, str) or isinstance(instances, unicode):
|
||||
if isinstance(instances, basestring):
|
||||
instances = [instances]
|
||||
new_instances = self.connection.register_instances(self.name,
|
||||
instances)
|
||||
@ -281,7 +281,7 @@ class LoadBalancer(object):
|
||||
to remove from this load balancer.
|
||||
|
||||
"""
|
||||
if isinstance(instances, str) or isinstance(instances, unicode):
|
||||
if isinstance(instances, basestring):
|
||||
instances = [instances]
|
||||
new_instances = self.connection.deregister_instances(self.name,
|
||||
instances)
|
||||
@ -324,7 +324,7 @@ class LoadBalancer(object):
|
||||
listeners)
|
||||
|
||||
def create_listener(self, inPort, outPort=None, proto="tcp"):
|
||||
if outPort == None:
|
||||
if outPort is None:
|
||||
outPort = inPort
|
||||
return self.create_listeners([(inPort, outPort, proto)])
|
||||
|
||||
@ -380,7 +380,7 @@ class LoadBalancer(object):
|
||||
:param subnets: The name of the subnet(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(subnets, str) or isinstance(subnets, unicode):
|
||||
if isinstance(subnets, basestring):
|
||||
subnets = [subnets]
|
||||
new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets)
|
||||
self.subnets = new_subnets
|
||||
@ -393,7 +393,7 @@ class LoadBalancer(object):
|
||||
:param subnets: The name of the subnet(s) to detach.
|
||||
|
||||
"""
|
||||
if isinstance(subnets, str) or isinstance(subnets, unicode):
|
||||
if isinstance(subnets, basestring):
|
||||
subnets = [subnets]
|
||||
new_subnets = self.connection.detach_lb_from_subnets(self.name, subnets)
|
||||
self.subnets = new_subnets
|
||||
@ -408,8 +408,7 @@ class LoadBalancer(object):
|
||||
:param security_groups: The name of the security group(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(security_groups, str) or \
|
||||
isinstance(security_groups, unicode):
|
||||
if isinstance(security_groups, basestring):
|
||||
security_groups = [security_groups]
|
||||
new_sgs = self.connection.apply_security_groups_to_lb(
|
||||
self.name, security_groups)
|
||||
|
||||
@ -208,6 +208,8 @@ class Image(TaggedEC2Object):
|
||||
* m1.medium
|
||||
* m1.large
|
||||
* m1.xlarge
|
||||
* m3.medium
|
||||
* m3.large
|
||||
* m3.xlarge
|
||||
* m3.2xlarge
|
||||
* c1.medium
|
||||
|
||||
59
awx/lib/site-packages/boto/ec2/instancetype.py
Normal file
59
awx/lib/site-packages/boto/ec2/instancetype.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
|
||||
from boto.ec2.ec2object import EC2Object
|
||||
|
||||
|
||||
class InstanceType(EC2Object):
|
||||
"""
|
||||
Represents an EC2 VM Type
|
||||
|
||||
:ivar name: The name of the vm type
|
||||
:ivar cores: The number of cpu cores for this vm type
|
||||
:ivar memory: The amount of memory in megabytes for this vm type
|
||||
:ivar disk: The amount of disk space in gigabytes for this vm type
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None, name=None, cores=None,
|
||||
memory=None, disk=None):
|
||||
super(InstanceType, self).__init__(connection)
|
||||
self.connection = connection
|
||||
self.name = name
|
||||
self.cores = cores
|
||||
self.memory = memory
|
||||
self.disk = disk
|
||||
|
||||
def __repr__(self):
|
||||
return 'InstanceType:%s-%s,%s,%s' % (self.name, self.cores,
|
||||
self.memory, self.disk)
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'name':
|
||||
self.name = value
|
||||
elif name == 'cpu':
|
||||
self.cores = value
|
||||
elif name == 'disk':
|
||||
self.disk = value
|
||||
elif name == 'memory':
|
||||
self.memory = value
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
@ -28,7 +28,8 @@ class EC2RegionInfo(RegionInfo):
|
||||
Represents an EC2 Region
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None, name=None, endpoint=None):
|
||||
def __init__(self, connection=None, name=None, endpoint=None,
|
||||
connection_cls=None):
|
||||
from boto.ec2.connection import EC2Connection
|
||||
super(EC2RegionInfo, self).__init__(connection, name, endpoint,
|
||||
EC2Connection)
|
||||
|
||||
@ -41,10 +41,13 @@ class ECSConnection(AWSQueryConnection):
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com',
|
||||
debug=0, https_connection_factory=None, path='/'):
|
||||
debug=0, https_connection_factory=None, path='/',
|
||||
security_token=None, profile_name=None):
|
||||
super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
|
||||
host, debug, https_connection_factory, path)
|
||||
host, debug, https_connection_factory, path,
|
||||
security_token=security_token,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['ecs']
|
||||
@ -66,7 +69,7 @@ class ECSConnection(AWSQueryConnection):
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
|
||||
if itemSet == None:
|
||||
if itemSet is None:
|
||||
rs = ItemSet(self, action, params, page)
|
||||
else:
|
||||
rs = itemSet
|
||||
|
||||
@ -110,7 +110,7 @@ class ItemSet(ResponseGroup):
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == "Item":
|
||||
self.curItem = Item(self._connection)
|
||||
elif self.curItem != None:
|
||||
elif self.curItem is not None:
|
||||
self.curItem.startElement(name, attrs, connection)
|
||||
return None
|
||||
|
||||
@ -123,13 +123,13 @@ class ItemSet(ResponseGroup):
|
||||
self.objs.append(self.curItem)
|
||||
self._xml.write(self.curItem.to_xml())
|
||||
self.curItem = None
|
||||
elif self.curItem != None:
|
||||
elif self.curItem is not None:
|
||||
self.curItem.endElement(name, value, connection)
|
||||
return None
|
||||
|
||||
def next(self):
|
||||
"""Special paging functionality"""
|
||||
if self.iter == None:
|
||||
if self.iter is None:
|
||||
self.iter = iter(self.objs)
|
||||
try:
|
||||
return self.iter.next()
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -31,34 +31,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.elasticache.layer1 import ElastiCacheConnection
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='elasticache.us-east-1.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='elasticache.us-west-1.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='elasticache.us-west-2.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='elasticache.eu-west-1.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='elasticache.ap-northeast-1.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='elasticache.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='elasticache.ap-southeast-2.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='sa-east-1',
|
||||
endpoint='elasticache.sa-east-1.amazonaws.com',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
RegionInfo(name='cn-north-1',
|
||||
endpoint='elasticache.cn-north-1.amazonaws.com.cn',
|
||||
connection_cls=ElastiCacheConnection),
|
||||
]
|
||||
return get_regions('elasticache', connection_cls=ElastiCacheConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -31,27 +31,10 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.elastictranscoder.layer1 import ElasticTranscoderConnection
|
||||
cls = ElasticTranscoderConnection
|
||||
return [
|
||||
RegionInfo(name='us-east-1',
|
||||
endpoint='elastictranscoder.us-east-1.amazonaws.com',
|
||||
connection_cls=cls),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='elastictranscoder.us-west-1.amazonaws.com',
|
||||
connection_cls=cls),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='elastictranscoder.us-west-2.amazonaws.com',
|
||||
connection_cls=cls),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='elastictranscoder.ap-northeast-1.amazonaws.com',
|
||||
connection_cls=cls),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='elastictranscoder.ap-southeast-1.amazonaws.com',
|
||||
connection_cls=cls),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='elastictranscoder.eu-west-1.amazonaws.com',
|
||||
connection_cls=cls),
|
||||
]
|
||||
return get_regions(
|
||||
'elastictranscoder',
|
||||
connection_cls=ElasticTranscoderConnection
|
||||
)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -29,7 +29,7 @@ service from AWS.
|
||||
from connection import EmrConnection
|
||||
from step import Step, StreamingStep, JarStep
|
||||
from bootstrap_action import BootstrapAction
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -39,34 +39,7 @@ def regions():
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='elasticmapreduce.us-east-1.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='us-west-1.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='us-west-2.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='ap-northeast-1.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='ap-southeast-1',
|
||||
endpoint='ap-southeast-1.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='ap-southeast-2.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='eu-west-1.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='sa-east-1',
|
||||
endpoint='sa-east-1.elasticmapreduce.amazonaws.com',
|
||||
connection_cls=EmrConnection),
|
||||
RegionInfo(name='cn-north-1',
|
||||
endpoint='elasticmapreduce.cn-north-1.amazonaws.com.cn',
|
||||
connection_cls=EmrConnection),
|
||||
]
|
||||
return get_regions('elasticmapreduce', connection_cls=EmrConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -55,7 +55,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, debug=0,
|
||||
https_connection_factory=None, region=None, path='/',
|
||||
security_token=None, validate_certs=True):
|
||||
security_token=None, validate_certs=True, profile_name=None):
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
@ -67,7 +67,8 @@ class EmrConnection(AWSQueryConnection):
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
# Many of the EMR hostnames are of the form:
|
||||
# <region>.<service_name>.amazonaws.com
|
||||
# rather than the more common:
|
||||
@ -265,7 +266,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
if step_states:
|
||||
self.build_list_params(params, step_states, 'StepStateList.member')
|
||||
|
||||
self.get_object('ListSteps', params, StepSummaryList)
|
||||
return self.get_object('ListSteps', params, StepSummaryList)
|
||||
|
||||
def add_tags(self, resource_id, tags):
|
||||
"""
|
||||
|
||||
@ -262,11 +262,12 @@ class Cluster(EmrObject):
|
||||
if name == 'Status':
|
||||
self.status = ClusterStatus()
|
||||
return self.status
|
||||
elif name == 'EC2InstanceAttributes':
|
||||
elif name == 'Ec2InstanceAttributes':
|
||||
self.ec2instanceattributes = Ec2InstanceAttributes()
|
||||
return self.ec2instanceattributes
|
||||
elif name == 'Applications':
|
||||
self.applications = ResultSet([('member', Application)])
|
||||
return self.applications
|
||||
elif name == 'Tags':
|
||||
self.tags = ResultSet([('member', KeyValue)])
|
||||
return self.tags
|
||||
|
||||
307
awx/lib/site-packages/boto/endpoints.json
Normal file
307
awx/lib/site-packages/boto/endpoints.json
Normal file
@ -0,0 +1,307 @@
|
||||
{
|
||||
"autoscaling": {
|
||||
"ap-northeast-1": "autoscaling.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "autoscaling.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "autoscaling.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "autoscaling.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "autoscaling.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "autoscaling.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "autoscaling.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "autoscaling.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "autoscaling.us-west-1.amazonaws.com",
|
||||
"us-west-2": "autoscaling.us-west-2.amazonaws.com"
|
||||
},
|
||||
"cloudformation": {
|
||||
"ap-northeast-1": "cloudformation.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "cloudformation.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "cloudformation.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "cloudformation.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "cloudformation.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "cloudformation.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "cloudformation.us-east-1.amazonaws.com",
|
||||
"us-west-1": "cloudformation.us-west-1.amazonaws.com",
|
||||
"us-west-2": "cloudformation.us-west-2.amazonaws.com"
|
||||
},
|
||||
"cloudfront": {
|
||||
"ap-northeast-1": "cloudfront.amazonaws.com",
|
||||
"ap-southeast-1": "cloudfront.amazonaws.com",
|
||||
"ap-southeast-2": "cloudfront.amazonaws.com",
|
||||
"eu-west-1": "cloudfront.amazonaws.com",
|
||||
"sa-east-1": "cloudfront.amazonaws.com",
|
||||
"us-east-1": "cloudfront.amazonaws.com",
|
||||
"us-west-1": "cloudfront.amazonaws.com",
|
||||
"us-west-2": "cloudfront.amazonaws.com"
|
||||
},
|
||||
"cloudsearch": {
|
||||
"ap-southeast-1": "cloudsearch.ap-southeast-1.amazonaws.com",
|
||||
"eu-west-1": "cloudsearch.eu-west-1.amazonaws.com",
|
||||
"us-east-1": "cloudsearch.us-east-1.amazonaws.com",
|
||||
"us-west-1": "cloudsearch.us-west-1.amazonaws.com",
|
||||
"us-west-2": "cloudsearch.us-west-2.amazonaws.com"
|
||||
},
|
||||
"cloudtrail": {
|
||||
"us-east-1": "cloudtrail.us-east-1.amazonaws.com",
|
||||
"us-west-2": "cloudtrail.us-west-2.amazonaws.com"
|
||||
},
|
||||
"cloudwatch": {
|
||||
"ap-northeast-1": "monitoring.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "monitoring.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "monitoring.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "monitoring.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "monitoring.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "monitoring.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "monitoring.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "monitoring.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "monitoring.us-west-1.amazonaws.com",
|
||||
"us-west-2": "monitoring.us-west-2.amazonaws.com"
|
||||
},
|
||||
"datapipeline": {
|
||||
"us-east-1": "datapipeline.us-east-1.amazonaws.com",
|
||||
"us-west-2": "datapipeline.us-west-2.amazonaws.com",
|
||||
"eu-west-1": "datapipeline.eu-west-1.amazonaws.com",
|
||||
"ap-southeast-2": "datapipeline.ap-southeast-2.amazonaws.com",
|
||||
"ap-northeast-1": "datapipeline.ap-northeast-1.amazonaws.com"
|
||||
},
|
||||
"directconnect": {
|
||||
"ap-northeast-1": "directconnect.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "directconnect.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "directconnect.ap-southeast-2.amazonaws.com",
|
||||
"eu-west-1": "directconnect.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "directconnect.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "directconnect.us-east-1.amazonaws.com",
|
||||
"us-west-1": "directconnect.us-west-1.amazonaws.com",
|
||||
"us-west-2": "directconnect.us-west-2.amazonaws.com"
|
||||
},
|
||||
"dynamodb": {
|
||||
"ap-northeast-1": "dynamodb.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "dynamodb.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "dynamodb.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "dynamodb.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "dynamodb.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "dynamodb.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "dynamodb.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "dynamodb.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "dynamodb.us-west-1.amazonaws.com",
|
||||
"us-west-2": "dynamodb.us-west-2.amazonaws.com"
|
||||
},
|
||||
"ec2": {
|
||||
"ap-northeast-1": "ec2.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "ec2.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "ec2.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "ec2.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "ec2.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "ec2.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "ec2.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "ec2.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "ec2.us-west-1.amazonaws.com",
|
||||
"us-west-2": "ec2.us-west-2.amazonaws.com"
|
||||
},
|
||||
"elasticache": {
|
||||
"ap-northeast-1": "elasticache.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "elasticache.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "elasticache.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "elasticache.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "elasticache.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "elasticache.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "elasticache.us-east-1.amazonaws.com",
|
||||
"us-west-1": "elasticache.us-west-1.amazonaws.com",
|
||||
"us-west-2": "elasticache.us-west-2.amazonaws.com"
|
||||
},
|
||||
"elasticbeanstalk": {
|
||||
"ap-northeast-1": "elasticbeanstalk.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "elasticbeanstalk.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "elasticbeanstalk.ap-southeast-2.amazonaws.com",
|
||||
"eu-west-1": "elasticbeanstalk.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "elasticbeanstalk.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "elasticbeanstalk.us-east-1.amazonaws.com",
|
||||
"us-west-1": "elasticbeanstalk.us-west-1.amazonaws.com",
|
||||
"us-west-2": "elasticbeanstalk.us-west-2.amazonaws.com"
|
||||
},
|
||||
"elasticloadbalancing": {
|
||||
"ap-northeast-1": "elasticloadbalancing.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "elasticloadbalancing.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "elasticloadbalancing.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "elasticloadbalancing.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "elasticloadbalancing.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "elasticloadbalancing.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "elasticloadbalancing.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "elasticloadbalancing.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "elasticloadbalancing.us-west-1.amazonaws.com",
|
||||
"us-west-2": "elasticloadbalancing.us-west-2.amazonaws.com"
|
||||
},
|
||||
"elasticmapreduce": {
|
||||
"ap-northeast-1": "ap-northeast-1.elasticmapreduce.amazonaws.com",
|
||||
"ap-southeast-1": "ap-southeast-1.elasticmapreduce.amazonaws.com",
|
||||
"ap-southeast-2": "ap-southeast-2.elasticmapreduce.amazonaws.com",
|
||||
"cn-north-1": "elasticmapreduce.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "elasticmapreduce.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "sa-east-1.elasticmapreduce.amazonaws.com",
|
||||
"us-east-1": "elasticmapreduce.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "us-gov-west-1.elasticmapreduce.amazonaws.com",
|
||||
"us-west-1": "us-west-1.elasticmapreduce.amazonaws.com",
|
||||
"us-west-2": "us-west-2.elasticmapreduce.amazonaws.com"
|
||||
},
|
||||
"elastictranscoder": {
|
||||
"ap-northeast-1": "elastictranscoder.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "elastictranscoder.ap-southeast-1.amazonaws.com",
|
||||
"eu-west-1": "elastictranscoder.eu-west-1.amazonaws.com",
|
||||
"us-east-1": "elastictranscoder.us-east-1.amazonaws.com",
|
||||
"us-west-1": "elastictranscoder.us-west-1.amazonaws.com",
|
||||
"us-west-2": "elastictranscoder.us-west-2.amazonaws.com"
|
||||
},
|
||||
"glacier": {
|
||||
"ap-northeast-1": "glacier.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "glacier.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "glacier.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "glacier.eu-west-1.amazonaws.com",
|
||||
"us-east-1": "glacier.us-east-1.amazonaws.com",
|
||||
"us-west-1": "glacier.us-west-1.amazonaws.com",
|
||||
"us-west-2": "glacier.us-west-2.amazonaws.com"
|
||||
},
|
||||
"iam": {
|
||||
"ap-northeast-1": "iam.amazonaws.com",
|
||||
"ap-southeast-1": "iam.amazonaws.com",
|
||||
"ap-southeast-2": "iam.amazonaws.com",
|
||||
"cn-north-1": "iam.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "iam.amazonaws.com",
|
||||
"sa-east-1": "iam.amazonaws.com",
|
||||
"us-east-1": "iam.amazonaws.com",
|
||||
"us-gov-west-1": "iam.us-gov.amazonaws.com",
|
||||
"us-west-1": "iam.amazonaws.com",
|
||||
"us-west-2": "iam.amazonaws.com"
|
||||
},
|
||||
"importexport": {
|
||||
"ap-northeast-1": "importexport.amazonaws.com",
|
||||
"ap-southeast-1": "importexport.amazonaws.com",
|
||||
"ap-southeast-2": "importexport.amazonaws.com",
|
||||
"eu-west-1": "importexport.amazonaws.com",
|
||||
"sa-east-1": "importexport.amazonaws.com",
|
||||
"us-east-1": "importexport.amazonaws.com",
|
||||
"us-west-1": "importexport.amazonaws.com",
|
||||
"us-west-2": "importexport.amazonaws.com"
|
||||
},
|
||||
"kinesis": {
|
||||
"us-east-1": "kinesis.us-east-1.amazonaws.com"
|
||||
},
|
||||
"opsworks": {
|
||||
"us-east-1": "opsworks.us-east-1.amazonaws.com"
|
||||
},
|
||||
"rds": {
|
||||
"ap-northeast-1": "rds.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "rds.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "rds.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "rds.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "rds.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "rds.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "rds.amazonaws.com",
|
||||
"us-gov-west-1": "rds.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "rds.us-west-1.amazonaws.com",
|
||||
"us-west-2": "rds.us-west-2.amazonaws.com"
|
||||
},
|
||||
"redshift": {
|
||||
"ap-northeast-1": "redshift.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "redshift.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "redshift.ap-southeast-2.amazonaws.com",
|
||||
"eu-west-1": "redshift.eu-west-1.amazonaws.com",
|
||||
"us-east-1": "redshift.us-east-1.amazonaws.com",
|
||||
"us-west-2": "redshift.us-west-2.amazonaws.com"
|
||||
},
|
||||
"route53": {
|
||||
"ap-northeast-1": "route53.amazonaws.com",
|
||||
"ap-southeast-1": "route53.amazonaws.com",
|
||||
"ap-southeast-2": "route53.amazonaws.com",
|
||||
"eu-west-1": "route53.amazonaws.com",
|
||||
"sa-east-1": "route53.amazonaws.com",
|
||||
"us-east-1": "route53.amazonaws.com",
|
||||
"us-west-1": "route53.amazonaws.com",
|
||||
"us-west-2": "route53.amazonaws.com"
|
||||
},
|
||||
"s3": {
|
||||
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "s3-eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "s3-sa-east-1.amazonaws.com",
|
||||
"us-east-1": "s3.amazonaws.com",
|
||||
"us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "s3-us-west-1.amazonaws.com",
|
||||
"us-west-2": "s3-us-west-2.amazonaws.com"
|
||||
},
|
||||
"sdb": {
|
||||
"ap-northeast-1": "sdb.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "sdb.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "sdb.ap-southeast-2.amazonaws.com",
|
||||
"eu-west-1": "sdb.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "sdb.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "sdb.amazonaws.com",
|
||||
"us-west-1": "sdb.us-west-1.amazonaws.com",
|
||||
"us-west-2": "sdb.us-west-2.amazonaws.com"
|
||||
},
|
||||
"ses": {
|
||||
"eu-west-1": "email.eu-west-1.amazonaws.com",
|
||||
"us-east-1": "email.us-east-1.amazonaws.com",
|
||||
"us-west-2": "email.us-west-2.amazonaws.com"
|
||||
},
|
||||
"sns": {
|
||||
"ap-northeast-1": "sns.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "sns.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "sns.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "sns.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "sns.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "sns.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "sns.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "sns.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "sns.us-west-1.amazonaws.com",
|
||||
"us-west-2": "sns.us-west-2.amazonaws.com"
|
||||
},
|
||||
"sqs": {
|
||||
"ap-northeast-1": "ap-northeast-1.queue.amazonaws.com",
|
||||
"ap-southeast-1": "ap-southeast-1.queue.amazonaws.com",
|
||||
"ap-southeast-2": "ap-southeast-2.queue.amazonaws.com",
|
||||
"cn-north-1": "sqs.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "eu-west-1.queue.amazonaws.com",
|
||||
"sa-east-1": "sa-east-1.queue.amazonaws.com",
|
||||
"us-east-1": "queue.amazonaws.com",
|
||||
"us-gov-west-1": "us-gov-west-1.queue.amazonaws.com",
|
||||
"us-west-1": "us-west-1.queue.amazonaws.com",
|
||||
"us-west-2": "us-west-2.queue.amazonaws.com"
|
||||
},
|
||||
"storagegateway": {
|
||||
"ap-northeast-1": "storagegateway.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "storagegateway.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "storagegateway.ap-southeast-2.amazonaws.com",
|
||||
"eu-west-1": "storagegateway.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "storagegateway.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "storagegateway.us-east-1.amazonaws.com",
|
||||
"us-west-1": "storagegateway.us-west-1.amazonaws.com",
|
||||
"us-west-2": "storagegateway.us-west-2.amazonaws.com"
|
||||
},
|
||||
"sts": {
|
||||
"ap-northeast-1": "sts.amazonaws.com",
|
||||
"ap-southeast-1": "sts.amazonaws.com",
|
||||
"ap-southeast-2": "sts.amazonaws.com",
|
||||
"cn-north-1": "sts.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "sts.amazonaws.com",
|
||||
"sa-east-1": "sts.amazonaws.com",
|
||||
"us-east-1": "sts.amazonaws.com",
|
||||
"us-gov-west-1": "sts.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "sts.amazonaws.com",
|
||||
"us-west-2": "sts.amazonaws.com"
|
||||
},
|
||||
"support": {
|
||||
"us-east-1": "support.us-east-1.amazonaws.com"
|
||||
},
|
||||
"swf": {
|
||||
"ap-northeast-1": "swf.ap-northeast-1.amazonaws.com",
|
||||
"ap-southeast-1": "swf.ap-southeast-1.amazonaws.com",
|
||||
"ap-southeast-2": "swf.ap-southeast-2.amazonaws.com",
|
||||
"cn-north-1": "swf.cn-north-1.amazonaws.com.cn",
|
||||
"eu-west-1": "swf.eu-west-1.amazonaws.com",
|
||||
"sa-east-1": "swf.sa-east-1.amazonaws.com",
|
||||
"us-east-1": "swf.us-east-1.amazonaws.com",
|
||||
"us-gov-west-1": "swf.us-gov-west-1.amazonaws.com",
|
||||
"us-west-1": "swf.us-west-1.amazonaws.com",
|
||||
"us-west-2": "swf.us-west-2.amazonaws.com"
|
||||
}
|
||||
}
|
||||
@ -27,6 +27,7 @@ Exception classes - Subclassing allows you to check for specific errors
|
||||
import base64
|
||||
import xml.sax
|
||||
from boto import handler
|
||||
from boto.compat import json
|
||||
from boto.resultset import ResultSet
|
||||
|
||||
|
||||
@ -88,12 +89,25 @@ class BotoServerError(StandardError):
|
||||
h = handler.XmlHandlerWrapper(self, self)
|
||||
h.parseString(self.body)
|
||||
except (TypeError, xml.sax.SAXParseException), pe:
|
||||
# Remove unparsable message body so we don't include garbage
|
||||
# in exception. But first, save self.body in self.error_message
|
||||
# because occasionally we get error messages from Eucalyptus
|
||||
# that are just text strings that we want to preserve.
|
||||
self.message = self.body
|
||||
self.body = None
|
||||
# What if it's JSON? Let's try that.
|
||||
try:
|
||||
parsed = json.loads(self.body)
|
||||
|
||||
if 'RequestId' in parsed:
|
||||
self.request_id = parsed['RequestId']
|
||||
if 'Error' in parsed:
|
||||
if 'Code' in parsed['Error']:
|
||||
self.error_code = parsed['Error']['Code']
|
||||
if 'Message' in parsed['Error']:
|
||||
self.message = parsed['Error']['Message']
|
||||
|
||||
except ValueError:
|
||||
# Remove unparsable message body so we don't include garbage
|
||||
# in exception. But first, save self.body in self.error_message
|
||||
# because occasionally we get error messages from Eucalyptus
|
||||
# that are just text strings that we want to preserve.
|
||||
self.message = self.body
|
||||
self.body = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == 'error_message':
|
||||
|
||||
@ -1,3 +1,26 @@
|
||||
# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
|
||||
# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/
|
||||
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
|
||||
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
from decimal import Decimal
|
||||
|
||||
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from boto.ec2.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -32,28 +32,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.glacier.layer2 import Layer2
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='glacier.us-east-1.amazonaws.com',
|
||||
connection_cls=Layer2),
|
||||
RegionInfo(name='us-west-1',
|
||||
endpoint='glacier.us-west-1.amazonaws.com',
|
||||
connection_cls=Layer2),
|
||||
RegionInfo(name='us-west-2',
|
||||
endpoint='glacier.us-west-2.amazonaws.com',
|
||||
connection_cls=Layer2),
|
||||
RegionInfo(name='ap-northeast-1',
|
||||
endpoint='glacier.ap-northeast-1.amazonaws.com',
|
||||
connection_cls=Layer2),
|
||||
RegionInfo(name='eu-west-1',
|
||||
endpoint='glacier.eu-west-1.amazonaws.com',
|
||||
connection_cls=Layer2),
|
||||
RegionInfo(name='ap-southeast-2',
|
||||
endpoint='glacier.ap-southeast-2.amazonaws.com',
|
||||
connection_cls=Layer2),
|
||||
RegionInfo(name='cn-north-1',
|
||||
endpoint='glacier.cn-north-1.amazonaws.com.cn',
|
||||
connection_cls=Layer2),
|
||||
]
|
||||
return get_regions('glacier', connection_cls=Layer2)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -19,6 +19,8 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import math
|
||||
import threading
|
||||
|
||||
@ -97,9 +97,12 @@ class Job(object):
|
||||
actual_tree_hash, response['TreeHash'], byte_range))
|
||||
return response
|
||||
|
||||
def _calc_num_chunks(self, chunk_size):
|
||||
return int(math.ceil(self.archive_size / float(chunk_size)))
|
||||
|
||||
def download_to_file(self, filename, chunk_size=DefaultPartSize,
|
||||
verify_hashes=True, retry_exceptions=(socket.error,)):
|
||||
"""Download an archive to a file.
|
||||
"""Download an archive to a file by name.
|
||||
|
||||
:type filename: str
|
||||
:param filename: The name of the file where the archive
|
||||
@ -114,11 +117,33 @@ class Job(object):
|
||||
the tree hashes for each downloaded chunk.
|
||||
|
||||
"""
|
||||
num_chunks = int(math.ceil(self.archive_size / float(chunk_size)))
|
||||
num_chunks = self._calc_num_chunks(chunk_size)
|
||||
with open(filename, 'wb') as output_file:
|
||||
self._download_to_fileob(output_file, num_chunks, chunk_size,
|
||||
verify_hashes, retry_exceptions)
|
||||
|
||||
def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
|
||||
verify_hashes=True,
|
||||
retry_exceptions=(socket.error,)):
|
||||
"""Download an archive to a file object.
|
||||
|
||||
:type output_file: file
|
||||
:param output_file: The file object where the archive
|
||||
contents will be saved.
|
||||
|
||||
:type chunk_size: int
|
||||
:param chunk_size: The chunk size to use when downloading
|
||||
the archive.
|
||||
|
||||
:type verify_hashes: bool
|
||||
:param verify_hashes: Indicates whether or not to verify
|
||||
the tree hashes for each downloaded chunk.
|
||||
|
||||
"""
|
||||
num_chunks = self._calc_num_chunks(chunk_size)
|
||||
self._download_to_fileob(output_file, num_chunks, chunk_size,
|
||||
verify_hashes, retry_exceptions)
|
||||
|
||||
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
|
||||
retry_exceptions):
|
||||
for i in xrange(num_chunks):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -300,7 +300,9 @@ class Vault(object):
|
||||
return self.get_job(response['JobId'])
|
||||
|
||||
def retrieve_inventory(self, sns_topic=None,
|
||||
description=None):
|
||||
description=None, byte_range=None,
|
||||
start_date=None, end_date=None,
|
||||
limit=None):
|
||||
"""
|
||||
Initiate a inventory retrieval job to list the items in the
|
||||
vault. You will need to wait for the notification from
|
||||
@ -315,6 +317,18 @@ class Vault(object):
|
||||
sends notification when the job is completed and the output
|
||||
is ready for you to download.
|
||||
|
||||
:type byte_range: str
|
||||
:param byte_range: Range of bytes to retrieve.
|
||||
|
||||
:type start_date: DateTime
|
||||
:param start_date: Beginning of the date range to query.
|
||||
|
||||
:type end_date: DateTime
|
||||
:param end_date: End of the date range to query.
|
||||
|
||||
:type limit: int
|
||||
:param limit: Limits the number of results returned.
|
||||
|
||||
:rtype: str
|
||||
:return: The ID of the job
|
||||
"""
|
||||
@ -323,6 +337,19 @@ class Vault(object):
|
||||
job_data['SNSTopic'] = sns_topic
|
||||
if description is not None:
|
||||
job_data['Description'] = description
|
||||
if byte_range is not None:
|
||||
job_data['RetrievalByteRange'] = byte_range
|
||||
if start_date is not None or end_date is not None or limit is not None:
|
||||
rparams = {}
|
||||
|
||||
if start_date is not None:
|
||||
rparams['StartDate'] = start_date.isoformat()
|
||||
if end_date is not None:
|
||||
rparams['EndDate'] = end_date.isoformat()
|
||||
if limit is not None:
|
||||
rparams['Limit'] = limit
|
||||
|
||||
job_data['InventoryRetrievalParameters'] = rparams
|
||||
|
||||
response = self.layer1.initiate_job(self.name, job_data)
|
||||
return response['JobId']
|
||||
@ -340,6 +367,18 @@ class Vault(object):
|
||||
sends notification when the job is completed and the output
|
||||
is ready for you to download.
|
||||
|
||||
:type byte_range: str
|
||||
:param byte_range: Range of bytes to retrieve.
|
||||
|
||||
:type start_date: DateTime
|
||||
:param start_date: Beginning of the date range to query.
|
||||
|
||||
:type end_date: DateTime
|
||||
:param end_date: End of the date range to query.
|
||||
|
||||
:type limit: int
|
||||
:param limit: Limits the number of results returned.
|
||||
|
||||
:rtype: :class:`boto.glacier.job.Job`
|
||||
:return: A Job object representing the retrieval job.
|
||||
"""
|
||||
|
||||
@ -38,7 +38,7 @@ def versioned_bucket_lister(bucket, prefix='', delimiter='',
|
||||
generation_marker = rs.next_generation_marker
|
||||
more_results= rs.is_truncated
|
||||
|
||||
class VersionedBucketListResultSet:
|
||||
class VersionedBucketListResultSet(object):
|
||||
"""
|
||||
A resultset for listing versions within a bucket. Uses the bucket_lister
|
||||
generator function and implements the iterator interface. This
|
||||
|
||||
@ -25,14 +25,14 @@ from boto.s3.connection import SubdomainCallingFormat
|
||||
from boto.s3.connection import check_lowercase_bucketname
|
||||
from boto.utils import get_utf8_value
|
||||
|
||||
class Location:
|
||||
class Location(object):
|
||||
DEFAULT = 'US'
|
||||
EU = 'EU'
|
||||
|
||||
class GSConnection(S3Connection):
|
||||
|
||||
DefaultHost = 'storage.googleapis.com'
|
||||
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
|
||||
QueryString = 'Signature=%s&Expires=%d&GoogleAccessId=%s'
|
||||
|
||||
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
@ -103,3 +103,27 @@ class GSConnection(S3Connection):
|
||||
raise self.provider.storage_response_error(
|
||||
response.status, response.reason, body)
|
||||
|
||||
def get_bucket(self, bucket_name, validate=True, headers=None):
|
||||
"""
|
||||
Retrieves a bucket by name.
|
||||
|
||||
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
|
||||
you are unsure if the bucket exists or not, you can use the
|
||||
``S3Connection.lookup`` method, which will either return a valid bucket
|
||||
or ``None``.
|
||||
|
||||
:type bucket_name: string
|
||||
:param bucket_name: The name of the bucket
|
||||
|
||||
:type headers: dict
|
||||
:param headers: Additional headers to pass along with the request to
|
||||
AWS.
|
||||
|
||||
:type validate: boolean
|
||||
:param validate: If ``True``, it will try to fetch all keys within the
|
||||
given bucket. (Default: ``True``)
|
||||
"""
|
||||
bucket = self.bucket_class(self, bucket_name)
|
||||
if validate:
|
||||
bucket.get_all_keys(headers, maxkeys=0)
|
||||
return bucket
|
||||
|
||||
@ -109,6 +109,9 @@ class Key(S3Key):
|
||||
self.metageneration = resp.getheader('x-goog-metageneration', None)
|
||||
self.generation = resp.getheader('x-goog-generation', None)
|
||||
|
||||
def handle_restore_headers(self, response):
|
||||
return
|
||||
|
||||
def handle_addl_headers(self, headers):
|
||||
for key, value in headers:
|
||||
if key == 'x-goog-hash':
|
||||
@ -219,7 +222,7 @@ class Key(S3Key):
|
||||
with the stored object in the response. See
|
||||
http://goo.gl/sMkcC for details.
|
||||
"""
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
if res_download_handler:
|
||||
res_download_handler.get_file(self, fp, headers, cb, num_cb,
|
||||
torrent=torrent,
|
||||
@ -528,7 +531,7 @@ class Key(S3Key):
|
||||
|
||||
if hasattr(fp, 'name'):
|
||||
self.path = fp.name
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
if isinstance(fp, KeyFile):
|
||||
# Avoid EOF seek for KeyFile case as it's very inefficient.
|
||||
key = fp.getkey()
|
||||
@ -552,12 +555,12 @@ class Key(S3Key):
|
||||
fp.seek(spos)
|
||||
size = self.size
|
||||
|
||||
if md5 == None:
|
||||
if md5 is None:
|
||||
md5 = self.compute_md5(fp, size)
|
||||
self.md5 = md5[0]
|
||||
self.base64md5 = md5[1]
|
||||
|
||||
if self.name == None:
|
||||
if self.name is None:
|
||||
self.name = self.md5
|
||||
|
||||
if not replace:
|
||||
@ -792,7 +795,7 @@ class Key(S3Key):
|
||||
the acl will only be updated if its current metageneration number is
|
||||
this value.
|
||||
"""
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
|
||||
generation=generation,
|
||||
if_generation=if_generation,
|
||||
@ -809,7 +812,7 @@ class Key(S3Key):
|
||||
|
||||
:rtype: :class:`.gs.acl.ACL`
|
||||
"""
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
return self.bucket.get_acl(self.name, headers=headers,
|
||||
generation=generation)
|
||||
|
||||
@ -824,7 +827,7 @@ class Key(S3Key):
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
return self.bucket.get_xml_acl(self.name, headers=headers,
|
||||
generation=generation)
|
||||
|
||||
@ -852,7 +855,7 @@ class Key(S3Key):
|
||||
the acl will only be updated if its current metageneration number is
|
||||
this value.
|
||||
"""
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
|
||||
generation=generation,
|
||||
if_generation=if_generation,
|
||||
@ -883,7 +886,7 @@ class Key(S3Key):
|
||||
the acl will only be updated if its current metageneration number is
|
||||
this value.
|
||||
"""
|
||||
if self.bucket != None:
|
||||
if self.bucket is not None:
|
||||
return self.bucket.set_canned_acl(
|
||||
acl_str,
|
||||
self.name,
|
||||
|
||||
@ -102,13 +102,13 @@ class ResumableUploadHandler(object):
|
||||
# Ignore non-existent file (happens first time an upload
|
||||
# is attempted on a file), but warn user for other errors.
|
||||
if e.errno != errno.ENOENT:
|
||||
# Will restart because self.tracker_uri == None.
|
||||
# Will restart because self.tracker_uri is None.
|
||||
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
|
||||
'upload from scratch.' %
|
||||
(self.tracker_file_name, e.strerror))
|
||||
except InvalidUriError, e:
|
||||
# Warn user, but proceed (will restart because
|
||||
# self.tracker_uri == None).
|
||||
# self.tracker_uri is None).
|
||||
print('Invalid tracker URI (%s) found in URI tracker file '
|
||||
'(%s). Restarting upload from scratch.' %
|
||||
(uri, self.tracker_file_name))
|
||||
@ -124,8 +124,9 @@ class ResumableUploadHandler(object):
|
||||
return
|
||||
f = None
|
||||
try:
|
||||
f = open(self.tracker_file_name, 'w')
|
||||
f.write(self.tracker_uri)
|
||||
with os.fdopen(os.open(self.tracker_file_name,
|
||||
os.O_WRONLY | os.O_CREAT, 0600), 'w') as f:
|
||||
f.write(self.tracker_uri)
|
||||
except IOError, e:
|
||||
raise ResumableUploadException(
|
||||
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
|
||||
@ -134,9 +135,6 @@ class ResumableUploadHandler(object):
|
||||
'unwritable directory)' %
|
||||
(self.tracker_file_name, e.strerror),
|
||||
ResumableTransferDisposition.ABORT)
|
||||
finally:
|
||||
if f:
|
||||
f.close()
|
||||
|
||||
def _set_tracker_uri(self, uri):
|
||||
"""
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
|
||||
class User:
|
||||
class User(object):
|
||||
def __init__(self, parent=None, id='', name=''):
|
||||
if parent:
|
||||
parent.owner = self
|
||||
|
||||
@ -32,7 +32,7 @@ class XmlHandler(xml.sax.ContentHandler):
|
||||
def startElement(self, name, attrs):
|
||||
self.current_text = ''
|
||||
new_node = self.nodes[-1][1].startElement(name, attrs, self.connection)
|
||||
if new_node != None:
|
||||
if new_node is not None:
|
||||
self.nodes.append((name, new_node))
|
||||
|
||||
def endElement(self, name):
|
||||
|
||||
@ -109,8 +109,12 @@ class CertValidatingHTTPSConnection(httplib.HTTPConnection):
|
||||
if hasattr(self, "timeout") and self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect((self.host, self.port))
|
||||
boto.log.debug("wrapping ssl socket; CA certificate file=%s",
|
||||
self.ca_certs)
|
||||
msg = "wrapping ssl socket; "
|
||||
if self.ca_certs:
|
||||
msg += "CA certificate file=%s" %self.ca_certs
|
||||
else:
|
||||
msg += "using system provided SSL certs"
|
||||
boto.log.debug(msg)
|
||||
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,
|
||||
certfile=self.cert_file,
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
|
||||
@ -22,8 +22,8 @@
|
||||
|
||||
# this is here for backward compatibility
|
||||
# originally, the IAMConnection class was defined here
|
||||
from connection import IAMConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.iam.connection import IAMConnection
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
class IAMRegionInfo(RegionInfo):
|
||||
@ -50,16 +50,22 @@ def regions():
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
|
||||
"""
|
||||
return [IAMRegionInfo(name='universal',
|
||||
endpoint='iam.amazonaws.com',
|
||||
connection_cls=IAMConnection),
|
||||
IAMRegionInfo(name='us-gov-west-1',
|
||||
endpoint='iam.us-gov.amazonaws.com',
|
||||
connection_cls=IAMConnection),
|
||||
IAMRegionInfo(name='cn-north-1',
|
||||
endpoint='iam.cn-north-1.amazonaws.com.cn',
|
||||
connection_cls=IAMConnection)
|
||||
]
|
||||
regions = get_regions(
|
||||
'iam',
|
||||
region_cls=IAMRegionInfo,
|
||||
connection_cls=IAMConnection
|
||||
)
|
||||
|
||||
# For historical reasons, we had a "universal" endpoint as well.
|
||||
regions.append(
|
||||
IAMRegionInfo(
|
||||
name='universal',
|
||||
endpoint='iam.amazonaws.com',
|
||||
connection_cls=IAMConnection
|
||||
)
|
||||
)
|
||||
|
||||
return regions
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -40,15 +40,16 @@ class IAMConnection(AWSQueryConnection):
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
|
||||
debug=0, https_connection_factory=None,
|
||||
path='/', security_token=None, validate_certs=True):
|
||||
debug=0, https_connection_factory=None, path='/',
|
||||
security_token=None, validate_certs=True, profile_name=None):
|
||||
super(IAMConnection, self).__init__(aws_access_key_id,
|
||||
aws_secret_access_key,
|
||||
is_secure, port, proxy,
|
||||
proxy_port, proxy_user, proxy_pass,
|
||||
host, debug, https_connection_factory,
|
||||
path, security_token,
|
||||
validate_certs=validate_certs)
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
|
||||
@ -33,7 +33,7 @@ class XmlHandler(xml.sax.ContentHandler):
|
||||
def startElement(self, name, attrs):
|
||||
self.current_text = ''
|
||||
t = self.nodes[-1][1].startElement(name, attrs, self.connection)
|
||||
if t != None:
|
||||
if t is not None:
|
||||
if isinstance(t, tuple):
|
||||
self.nodes.append(t)
|
||||
else:
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
@ -31,11 +31,7 @@ def regions():
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.kinesis.layer1 import KinesisConnection
|
||||
|
||||
return [RegionInfo(name='us-east-1',
|
||||
endpoint='kinesis.us-east-1.amazonaws.com',
|
||||
connection_cls=KinesisConnection),
|
||||
]
|
||||
return get_regions('kinesis', connection_cls=KinesisConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
|
||||
@ -532,11 +532,10 @@ class KinesisConnection(AWSQueryConnection):
|
||||
placed and the sequence number that was assigned to the data
|
||||
record.
|
||||
|
||||
The `SequenceNumberForOrdering` sets the initial sequence
|
||||
number for the partition key. Later `PutRecord` requests to
|
||||
the same partition key (from the same client) will
|
||||
automatically increase from `SequenceNumberForOrdering`,
|
||||
ensuring strict sequential ordering.
|
||||
Sequence numbers generally increase over time. To guarantee
|
||||
strictly increasing ordering, use the
|
||||
`SequenceNumberForOrdering` parameter. For more information,
|
||||
see the `Amazon Kinesis Developer Guide`_.
|
||||
|
||||
If a `PutRecord` request cannot be processed because of
|
||||
insufficient provisioned throughput on the shard involved in
|
||||
@ -550,8 +549,10 @@ class KinesisConnection(AWSQueryConnection):
|
||||
:param stream_name: The name of the stream to put the data record into.
|
||||
|
||||
:type data: blob
|
||||
:param data: The data blob to put into the record, which will be Base64
|
||||
encoded. The maximum size of the data blob is 50 kilobytes (KB).
|
||||
:param data: The data blob to put into the record, which is
|
||||
Base64-encoded when the blob is serialized.
|
||||
The maximum size of the data blob (the payload after
|
||||
Base64-decoding) is 50 kilobytes (KB)
|
||||
Set `b64_encode` to disable automatic Base64 encoding.
|
||||
|
||||
:type partition_key: string
|
||||
@ -571,10 +572,12 @@ class KinesisConnection(AWSQueryConnection):
|
||||
partition key hash.
|
||||
|
||||
:type sequence_number_for_ordering: string
|
||||
:param sequence_number_for_ordering: The sequence number to use as the
|
||||
initial number for the partition key. Subsequent calls to
|
||||
`PutRecord` from the same client and for the same partition key
|
||||
will increase from the `SequenceNumberForOrdering` value.
|
||||
:param sequence_number_for_ordering: Guarantees strictly increasing
|
||||
sequence numbers, for puts from the same client and to the same
|
||||
partition key. Usage: set the `SequenceNumberForOrdering` of record
|
||||
n to the sequence number of record n-1 (as returned in the
|
||||
PutRecordResult when putting record n-1 ). If this parameter is not
|
||||
set, records will be coarsely ordered based on arrival time.
|
||||
|
||||
:type b64_encode: boolean
|
||||
:param b64_encode: Whether to Base64 encode `data`. Can be set to
|
||||
|
||||
@ -118,7 +118,7 @@ class SSHClient(object):
|
||||
def run(self, command):
|
||||
"""
|
||||
Execute a command on the remote host. Return a tuple containing
|
||||
an integer status and a two strings, the first containing stdout
|
||||
an integer status and two strings, the first containing stdout
|
||||
and the second containing stderr from the command.
|
||||
"""
|
||||
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
|
||||
@ -182,7 +182,7 @@ class LocalClient(object):
|
||||
log_fp = StringIO.StringIO()
|
||||
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
while process.poll() == None:
|
||||
while process.poll() is None:
|
||||
time.sleep(1)
|
||||
t = process.communicate()
|
||||
log_fp.write(t[0])
|
||||
|
||||
@ -137,7 +137,7 @@ class CommandLineGetter(object):
|
||||
|
||||
def get_region(self, params):
|
||||
region = params.get('region', None)
|
||||
if isinstance(region, str) or isinstance(region, unicode):
|
||||
if isinstance(region, basestring):
|
||||
region = boto.ec2.get_region(region)
|
||||
params['region'] = region
|
||||
if not region:
|
||||
@ -189,7 +189,7 @@ class CommandLineGetter(object):
|
||||
|
||||
def get_group(self, params):
|
||||
group = params.get('group', None)
|
||||
if isinstance(group, str) or isinstance(group, unicode):
|
||||
if isinstance(group, basestring):
|
||||
group_list = self.ec2.get_all_security_groups()
|
||||
for g in group_list:
|
||||
if g.name == group:
|
||||
@ -202,7 +202,7 @@ class CommandLineGetter(object):
|
||||
|
||||
def get_key(self, params):
|
||||
keypair = params.get('keypair', None)
|
||||
if isinstance(keypair, str) or isinstance(keypair, unicode):
|
||||
if isinstance(keypair, basestring):
|
||||
key_list = self.ec2.get_all_key_pairs()
|
||||
for k in key_list:
|
||||
if k.name == keypair:
|
||||
@ -323,7 +323,7 @@ class Server(Model):
|
||||
i = 0
|
||||
elastic_ip = params.get('elastic_ip')
|
||||
instances = reservation.instances
|
||||
if elastic_ip != None and instances.__len__() > 0:
|
||||
if elastic_ip is not None and instances.__len__() > 0:
|
||||
instance = instances[0]
|
||||
print 'Waiting for instance to start so we can set its elastic IP address...'
|
||||
# Sometimes we get a message from ec2 that says that the instance does not exist.
|
||||
|
||||
@ -105,7 +105,7 @@ class Task(Model):
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
nsecs = 5
|
||||
current_timeout = vtimeout
|
||||
while process.poll() == None:
|
||||
while process.poll() is None:
|
||||
boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout))
|
||||
if nsecs >= current_timeout:
|
||||
current_timeout += vtimeout
|
||||
|
||||
@ -136,7 +136,7 @@ class Volume(Model):
|
||||
if size < self.size:
|
||||
size = self.size
|
||||
ec2 = self.get_ec2_connection()
|
||||
if self.zone_name == None or self.zone_name == '':
|
||||
if self.zone_name is None or self.zone_name == '':
|
||||
# deal with the migration case where the zone is not set in the logical volume:
|
||||
current_volume = ec2.get_all_volumes([self.volume_id])[0]
|
||||
self.zone_name = current_volume.zone
|
||||
@ -155,7 +155,7 @@ class Volume(Model):
|
||||
def get_ec2_connection(self):
|
||||
if self.server:
|
||||
return self.server.ec2
|
||||
if not hasattr(self, 'ec2') or self.ec2 == None:
|
||||
if not hasattr(self, 'ec2') or self.ec2 is None:
|
||||
self.ec2 = boto.ec2.connect_to_region(self.region_name)
|
||||
return self.ec2
|
||||
|
||||
@ -209,7 +209,7 @@ class Volume(Model):
|
||||
|
||||
def detach(self, force=False):
|
||||
state = self.attachment_state
|
||||
if state == 'available' or state == None or state == 'detaching':
|
||||
if state == 'available' or state is None or state == 'detaching':
|
||||
print 'already detached'
|
||||
return None
|
||||
ec2 = self.get_ec2_connection()
|
||||
@ -218,7 +218,7 @@ class Volume(Model):
|
||||
self.put()
|
||||
|
||||
def checkfs(self, use_cmd=None):
|
||||
if self.server == None:
|
||||
if self.server is None:
|
||||
raise ValueError('server attribute must be set to run this command')
|
||||
# detemine state of file system on volume, only works if attached
|
||||
if use_cmd:
|
||||
@ -233,7 +233,7 @@ class Volume(Model):
|
||||
return True
|
||||
|
||||
def wait(self):
|
||||
if self.server == None:
|
||||
if self.server is None:
|
||||
raise ValueError('server attribute must be set to run this command')
|
||||
with closing(self.server.get_cmdshell()) as cmd:
|
||||
# wait for the volume device to appear
|
||||
@ -243,7 +243,7 @@ class Volume(Model):
|
||||
time.sleep(10)
|
||||
|
||||
def format(self):
|
||||
if self.server == None:
|
||||
if self.server is None:
|
||||
raise ValueError('server attribute must be set to run this command')
|
||||
status = None
|
||||
with closing(self.server.get_cmdshell()) as cmd:
|
||||
@ -253,7 +253,7 @@ class Volume(Model):
|
||||
return status
|
||||
|
||||
def mount(self):
|
||||
if self.server == None:
|
||||
if self.server is None:
|
||||
raise ValueError('server attribute must be set to run this command')
|
||||
boto.log.info('handle_mount_point')
|
||||
with closing(self.server.get_cmdshell()) as cmd:
|
||||
@ -302,7 +302,7 @@ class Volume(Model):
|
||||
# we need to freeze the XFS file system
|
||||
try:
|
||||
self.freeze()
|
||||
if self.server == None:
|
||||
if self.server is None:
|
||||
snapshot = self.get_ec2_connection().create_snapshot(self.volume_id)
|
||||
else:
|
||||
snapshot = self.server.ec2.create_snapshot(self.volume_id)
|
||||
|
||||
@ -179,7 +179,7 @@ class Order(IObject):
|
||||
item.ami.id, item.groups, item.key.name)
|
||||
|
||||
def place(self, block=True):
|
||||
if get_domain() == None:
|
||||
if get_domain() is None:
|
||||
print 'SDB Persistence Domain not set'
|
||||
domain_name = self.get_string('Specify SDB Domain')
|
||||
set_domain(domain_name)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user