Update third party packages.

This commit is contained in:
Chris Church 2014-01-02 15:09:48 -05:00
parent 2b7af0a2ec
commit d8423a3342
573 changed files with 34282 additions and 10890 deletions

View File

@ -5,48 +5,49 @@ amqp==1.3.3 (amqp/*)
anyjson==0.3.3 (anyjson/*)
argparse==1.2.1 (argparse.py, needed for Python 2.6 support)
Babel==1.3 (babel/*, excluded bin/pybabel)
billiard==3.3.0.6 (billiard/*, funtests/*, excluded _billiard.so)
boto==2.17.0 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin,
billiard==3.3.0.13 (billiard/*, funtests/*, excluded _billiard.so)
boto==2.21.2 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin,
bin/cq, bin/cwutil, bin/dynamodb_dump, bin/dynamodb_load, bin/elbadmin,
bin/fetch_file, bin/glacier, bin/instance_events, bin/kill_instance,
bin/launch_instance, bin/list_instances, bin/lss3, bin/mturk,
bin/pyami_sendmail, bin/route53, bin/s3put, bin/sdbadmin, bin/taskadmin)
celery==3.1.3 (celery/*, excluded bin/celery*)
celery==3.1.7 (celery/*, excluded bin/celery*)
d2to1==0.2.11 (d2to1/*)
distribute==0.7.3 (no files)
django-auth-ldap==1.1.6 (django_auth_ldap/*)
django-auth-ldap==1.1.7 (django_auth_ldap/*)
django-celery==3.1.1 (djcelery/*)
django-extensions==1.2.5 (django_extensions/*)
django-jsonfield==0.9.11 (jsonfield/*)
django-jsonfield==0.9.12 (jsonfield/*)
django-split-settings==0.1.1 (split_settings/*)
django-taggit==0.10 (taggit/*)
djangorestframework==2.3.8 (rest_framework/*)
django-taggit==0.11.2 (taggit/*)
djangorestframework==2.3.10 (rest_framework/*)
httplib2==0.8 (httplib2/*)
importlib==1.0.2 (importlib/*, needed for Python 2.6 support)
iso8601==0.1.8 (iso8601/*)
keyring==3.2 (keyring/*, excluded bin/keyring)
kombu==3.0.4 (kombu/*)
keyring==3.3 (keyring/*, excluded bin/keyring)
kombu==3.0.8 (kombu/*)
Markdown==2.3.1 (markdown/*, excluded bin/markdown_py)
mock==1.0.1 (mock.py)
ordereddict==1.1 (ordereddict.py, needed for Python 2.6 support)
os-diskconfig-python-novaclient-ext==0.1.1 (os_diskconfig_python_novaclient_ext/*)
os-networksv2-python-novaclient-ext==0.21 (os_networksv2_python_novaclient_ext.py)
os-virtual-interfacesv2-python-novaclient-ext==0.14 (os_virtual_interfacesv2_python_novaclient_ext.py)
pbr==0.5.23 (pbr/*)
pexpect==3.0 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py,
ANSI.py)
pip==1.4.1 (pip/*, excluded bin/pip*)
pip==1.5 (pip/*, excluded bin/pip*)
prettytable==0.7.2 (prettytable.py)
pyrax==1.6.2 (pyrax/*)
python-dateutil==2.2 (dateutil/*)
python-novaclient==2.15.0 (novaclient/*, excluded bin/nova)
python-swiftclient==1.8.0 (swiftclient/*, excluded bin/swift)
pytz==2013.8 (pytz/*)
rackspace-auth-openstack==1.1 (rackspace_auth_openstack/*)
rackspace-novaclient==1.3 (no files)
rackspace-auth-openstack==1.2 (rackspace_auth_openstack/*)
rackspace-novaclient==1.4 (no files)
rax-default-network-flags-python-novaclient-ext==0.1.3 (rax_default_network_flags_python_novaclient_ext/*)
rax-scheduled-images-python-novaclient-ext==0.2.1 (rax_scheduled_images_python_novaclient_ext/*)
requests==2.0.1 (requests/*)
setuptools==1.3.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*)
requests==2.1.0 (requests/*)
setuptools==2.0.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*)
simplejson==3.3.1 (simplejson/*, excluded simplejson/_speedups.so)
six==1.4.1 (six.py)
South==0.8.3 (south/*)
South==0.8.4 (south/*)

View File

@ -19,8 +19,8 @@
from __future__ import absolute_import
VERSION = (3, 3, 0, 6)
__version__ = ".".join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
VERSION = (3, 3, 0, 13)
__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
__author__ = 'R Oudkerk / Python Software Foundation'
__author_email__ = 'python-dev@python.org'
__maintainer__ = 'Ask Solem',

View File

@ -30,10 +30,17 @@ try:
import _winapi
from _winapi import (
WAIT_OBJECT_0,
WAIT_ABANDONED_0,
WAIT_TIMEOUT,
INFINITE,
)
# if we got here, we seem to be running on Windows. Handle probably
# missing WAIT_ABANDONED_0 constant:
try:
from _winapi import WAIT_ABANDONED_0
except ImportError:
WAIT_ABANDONED_0 = 128 # _winapi seems to be not exporting
# this constant, fallback solution until
# exported in _winapi
except ImportError:
if sys.platform == 'win32':
raise
@ -540,6 +547,7 @@ if sys.platform != 'win32':
return c1, c2
else:
from billiard.forking import duplicate
def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa
'''
@ -574,9 +582,10 @@ else:
_, err = overlapped.GetOverlappedResult(True)
assert err == 0
c1 = PipeConnection(h1, writable=duplex)
c2 = PipeConnection(h2, readable=duplex)
c1 = PipeConnection(duplicate(h1, inheritable=True), writable=duplex)
c2 = PipeConnection(duplicate(h2, inheritable=True), readable=duplex)
_winapi.CloseHandle(h1)
_winapi.CloseHandle(h2)
return c1, c2
#

View File

@ -23,7 +23,11 @@ from . import current_process
from ._ext import _billiard, win32
from .util import register_after_fork, debug, sub_debug
if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')):
is_win32 = sys.platform == 'win32'
is_pypy = hasattr(sys, 'pypy_version_info')
is_py3k = sys.version_info[0] == 3
if not(is_win32 or is_pypy or is_py3k or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
close = win32.CloseHandle if sys.platform == 'win32' else os.close

View File

@ -39,13 +39,20 @@ else:
EX_SOFTWARE = 70
TERMSIGS = (
TERMSIGS_DEFAULT = (
'SIGHUP',
'SIGQUIT',
'SIGTERM',
'SIGUSR1',
'SIGUSR2'
)
TERMSIGS_FULL = (
'SIGHUP',
'SIGQUIT',
'SIGTRAP',
'SIGABRT',
'SIGEMT',
'SIGBUS',
'SIGSYS',
'SIGPIPE',
'SIGALRM',
@ -88,8 +95,8 @@ def _shutdown_cleanup(signum, frame):
sys.exit(-(256 - signum))
def reset_signals(handler=_shutdown_cleanup):
for sig in TERMSIGS:
def reset_signals(handler=_shutdown_cleanup, full=False):
for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT:
try:
signum = getattr(signal, sig)
except AttributeError:

View File

@ -18,9 +18,9 @@ else:
_winapi = None # noqa
try:
if sys.version_info > (2, 7, 5):
buf_t, is_new_buffer = memoryview, True # noqa
except NameError: # Py2.6
else:
buf_t, is_new_buffer = buffer, False # noqa
if hasattr(os, 'write'):
@ -31,7 +31,7 @@ if hasattr(os, 'write'):
def send_offset(fd, buf, offset):
return __write__(fd, buf[offset:])
else: # Py2.6
else: # Py<2.7.6
def send_offset(fd, buf, offset): # noqa
return __write__(fd, buf_t(buf, offset))

View File

@ -38,11 +38,10 @@ if sys.version_info < (3, 3):
if SYSTEM == 'Darwin':
import ctypes
from ctypes.util import find_library
libSystem = ctypes.CDLL('libSystem.dylib')
CoreServices = ctypes.CDLL(
'/System/Library/Frameworks/CoreServices.framework/CoreServices',
use_errno=True,
)
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds

View File

@ -19,7 +19,6 @@ import sys
import threading
import array
from collections import Callable
from traceback import format_exc
from . import Process, current_process, active_children, Pool, util, connection
@ -123,7 +122,7 @@ def all_methods(obj):
temp = []
for name in dir(obj):
func = getattr(obj, name)
if isinstance(func, Callable):
if callable(func):
temp.append(name)
return temp
@ -492,8 +491,7 @@ class BaseManager(object):
'''
assert self._state.value == State.INITIAL
if initializer is not None and \
not isinstance(initializer, Callable):
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server

View File

@ -23,7 +23,7 @@ import threading
import time
import warnings
from collections import Callable, deque
from collections import deque
from functools import partial
from . import Event, Process, cpu_count
@ -86,6 +86,7 @@ ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
@ -236,13 +237,15 @@ class Worker(Process):
_job_terminated = False
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None):
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self._make_shortcuts()
@ -295,7 +298,15 @@ class Worker(Process):
if self.on_exit is not None:
self.on_exit(pid, exitcode)
os._exit(exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
@ -378,7 +389,7 @@ class Worker(Process):
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals()
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
@ -760,8 +771,15 @@ class ResultHandler(PoolThread):
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready,
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
@ -934,12 +952,10 @@ class Pool(object):
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and \
not isinstance(initializer, Callable):
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and \
not isinstance(on_process_exit, Callable):
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
self._pool = []
@ -1034,6 +1050,9 @@ class Pool(object):
w = self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
)
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
@ -1072,18 +1091,20 @@ class Pool(object):
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = worker.exitcode
if worker.exitcode not in (EX_OK, EX_RECYCLE) and \
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with exitcode %r',
worker.name, worker.pid, worker.exitcode, exc_info=0,
worker.name, worker.pid, exitcode, exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
@ -1100,9 +1121,9 @@ class Pool(object):
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes[acked_by_gone]
if getattr(cleaned[acked_by_gone],
'_job_terminated', False):
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
@ -1114,9 +1135,9 @@ class Pool(object):
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and write_to.exitcode is not None:
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and sched_for.exitcode is not None:
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
@ -1551,7 +1572,7 @@ class Pool(object):
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._popen is not None and p.exitcode is None:
if p._is_alive():
p.terminate()
debug('joining task handler')
@ -1642,9 +1663,9 @@ class ApplyResult(object):
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=0):
def _set_terminated(self, signum=None):
try:
raise Terminated(-signum)
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))

View File

@ -22,6 +22,8 @@ import binascii
import logging
import threading
from multiprocessing import process as _mproc
from .compat import bytes
try:
from _weakrefset import WeakSet
@ -46,6 +48,11 @@ def current_process():
return _current_process
def _set_current_process(process):
global _current_process
_current_process = _mproc._current_process = process
def _cleanup():
# check for processes which have finished
if _current_process is not None:
@ -122,8 +129,6 @@ class Process(object):
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
@ -161,6 +166,11 @@ class Process(object):
self._popen.poll()
return self._popen.returncode is None
def _is_alive(self):
if self._popen is None:
return False
return self._popen.poll() is None
def _get_name(self):
return self._name
@ -254,7 +264,7 @@ class Process(object):
except (OSError, ValueError):
pass
old_process = _current_process
_current_process = self
_set_current_process(self)
# Re-init logging system.
# Workaround for http://bugs.python.org/issue6721/#msg140215

View File

@ -21,13 +21,14 @@ def signo(name):
@contextmanager
def termsigs(*sigs):
def termsigs(default, full):
from billiard import common
prev, common.TERMSIGS = common.TERMSIGS, sigs
prev_def, common.TERMSIGS_DEFAULT = common.TERMSIGS_DEFAULT, default
prev_full, common.TERMSIGS_FULL = common.TERMSIGS_FULL, full
try:
yield
finally:
common.TERMSIGS = prev
common.TERMSIGS_DEFAULT, common.TERMSIGS_FULL = prev_def, prev_full
class test_reset_signals(Case):
@ -39,21 +40,21 @@ class test_reset_signals(Case):
self.assertEqual(os.WTERMSIG(exit.call_args[0][0]), 15)
def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']):
with self.assert_context(sigs, signal.SIG_IGN) as (_, SET):
with self.assert_context(sigs, [], signal.SIG_IGN) as (_, SET):
self.assertFalse(SET.called)
def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']):
with self.assert_context(sigs, None) as (_, SET):
with self.assert_context(sigs, [], None) as (_, SET):
self.assertFalse(SET.called)
def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']):
with self.assert_context(sigs, signal.SIG_DFL) as (_, SET):
with self.assert_context(sigs, [], signal.SIG_DFL) as (_, SET):
SET.assert_has_calls([
call(signo(sig), _shutdown_cleanup) for sig in sigs
])
def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']):
with self.assert_context(sigs, object()) as (_, SET):
with self.assert_context(sigs, [], object()) as (_, SET):
SET.assert_has_calls([
call(signo(sig), _shutdown_cleanup) for sig in sigs
])
@ -61,19 +62,19 @@ class test_reset_signals(Case):
def test_handles_errors(self, sigs=['SIGTERM']):
for exc in (OSError(), AttributeError(),
ValueError(), RuntimeError()):
with self.assert_context(sigs, signal.SIG_DFL, exc) as (_, SET):
with self.assert_context(sigs, [], signal.SIG_DFL, exc) as (_, SET):
self.assertTrue(SET.called)
@contextmanager
def assert_context(self, sigs, get_returns=None, set_effect=None):
with termsigs(*sigs):
def assert_context(self, default, full, get_returns=None, set_effect=None):
with termsigs(default, full):
with patch('signal.getsignal') as GET:
with patch('signal.signal') as SET:
GET.return_value = get_returns
SET.side_effect = set_effect
reset_signals()
GET.assert_has_calls([
call(signo(sig)) for sig in sigs
call(signo(sig)) for sig in default
])
yield GET, SET

View File

@ -27,6 +27,7 @@
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
import datetime
import os
import platform
import re
@ -36,9 +37,12 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
__version__ = '2.17.0'
__version__ = '2.21.2'
Version = __version__ # for backware compatibility
# http://bugs.python.org/issue7980
datetime.datetime.strptime('', '')
UserAgent = 'Boto/%s Python/%s %s/%s' % (
__version__,
platform.python_version(),
@ -744,6 +748,50 @@ def connect_cloudtrail(aws_access_key_id=None,
)
def connect_directconnect(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to AWS DirectConnect
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.directconnect.layer1.DirectConnectConnection`
:return: A connection to the AWS DirectConnect service
"""
from boto.directconnect.layer1 import DirectConnectConnection
return DirectConnectConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def connect_kinesis(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
Connect to Amazon Kinesis
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
rtype: :class:`boto.kinesis.layer1.KinesisConnection`
:return: A connection to the Amazon Kinesis service
"""
from boto.kinesis.layer1 import KinesisConnection
return KinesisConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs
)
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True, is_latest=False):

View File

@ -39,35 +39,15 @@ import hmac
import sys
import time
import urllib
import urlparse
import posixpath
from boto.auth_handler import AuthHandler
from boto.exception import BotoClientError
#
# the following is necessary because of the incompatibilities
# between Python 2.4, 2.5, and 2.6 as well as the fact that some
# people running 2.4 have installed hashlib as a separate module
# this fix was provided by boto user mccormix.
# see: http://code.google.com/p/boto/issues/detail?id=172
# for more details.
#
try:
from hashlib import sha1 as sha
from hashlib import sha256 as sha256
if sys.version[:3] == "2.4":
# we are using an hmac that expects a .new() method.
class Faker:
def __init__(self, which):
self.which = which
self.digest_size = self.which().digest_size
def new(self, *args, **kwargs):
return self.which(*args, **kwargs)
sha = Faker(sha)
sha256 = Faker(sha256)
except ImportError:
import sha
sha256 = None
@ -129,7 +109,7 @@ class AnonAuthHandler(AuthHandler, HmacKeys):
capability = ['anon']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
super(AnonAuthHandler, self).__init__(host, config, provider)
def add_auth(self, http_request, **kwargs):
pass
@ -373,10 +353,15 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = sorted(['%s:%s' % (n.lower().strip(),
' '.join(headers_to_sign[n].strip().split()))
for n in headers_to_sign])
return '\n'.join(l)
canonical = []
for header in headers_to_sign:
c_name = header.lower().strip()
raw_value = headers_to_sign[header]
c_value = ' '.join(raw_value.strip().split())
canonical.append('%s:%s' % (c_name, c_value))
return '\n'.join(sorted(canonical))
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in headers_to_sign]
@ -421,14 +406,11 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
scope.append('aws4_request')
return '/'.join(scope)
def credential_scope(self, http_request):
scope = []
http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
scope.append(http_request.timestamp)
# The service_name and region_name either come from:
# * The service_name/region_name attrs or (if these values are None)
# * parsed from the endpoint <service>.<region>.amazonaws.com.
parts = http_request.host.split('.')
def split_host_parts(self, host):
return host.split('.')
def determine_region_name(self, host):
parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
elif len(parts) > 1:
@ -442,11 +424,25 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
else:
region_name = parts[0]
return region_name
def determine_service_name(self, host):
parts = self.split_host_parts(host)
if self.service_name is not None:
service_name = self.service_name
else:
service_name = parts[0]
return service_name
def credential_scope(self, http_request):
scope = []
http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
scope.append(http_request.timestamp)
# The service_name and region_name either come from:
# * The service_name/region_name attrs or (if these values are None)
# * parsed from the endpoint <service>.<region>.amazonaws.com.
region_name = self.determine_region_name(http_request.host)
service_name = self.determine_service_name(http_request.host)
http_request.service_name = service_name
http_request.region_name = region_name
@ -516,6 +512,153 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
req.headers['Authorization'] = ','.join(l)
class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
"""
Implements a variant of Version 4 HMAC authorization specific to S3.
"""
capability = ['hmac-v4-s3']
def __init__(self, *args, **kwargs):
super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs)
if self.region_name:
self.region_name = self.clean_region_name(self.region_name)
def clean_region_name(self, region_name):
if region_name.startswith('s3-'):
return region_name[3:]
return region_name
def canonical_uri(self, http_request):
# S3 does **NOT** do path normalization that SigV4 typically does.
# Urlencode the path, **NOT** ``auth_path`` (because vhosting).
path = urlparse.urlparse(http_request.path)
encoded = urllib.quote(path.path)
return encoded
def host_header(self, host, http_request):
port = http_request.port
secure = http_request.protocol == 'https'
if ((port == 80 and not secure) or (port == 443 and secure)):
return http_request.host
return '%s:%s' % (http_request.host, port)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
host_header_value = self.host_header(self.host, http_request)
headers_to_sign = {}
headers_to_sign = {'Host': host_header_value}
for name, value in http_request.headers.items():
lname = name.lower()
# Hooray for the only difference! The main SigV4 signer only does
# ``Host`` + ``x-amz-*``. But S3 wants pretty much everything
# signed, except for authorization itself.
if not lname in ['authorization']:
headers_to_sign[name] = value
return headers_to_sign
def determine_region_name(self, host):
# S3's different format(s) of representing region/service from the
# rest of AWS makes this hurt too.
#
# Possible domain formats:
# - s3.amazonaws.com (Classic)
# - s3-us-west-2.amazonaws.com (Specific region)
# - bukkit.s3.amazonaws.com (Vhosted Classic)
# - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region)
# - s3.cn-north-1.amazonaws.com.cn - (Bejing region)
# - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Bejing region)
parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
else:
# Classic URLs - s3-us-west-2.amazonaws.com
if len(parts) == 3:
region_name = self.clean_region_name(parts[0])
# Special-case for Classic.
if region_name == 's3':
region_name = 'us-east-1'
else:
# Iterate over the parts in reverse order.
for offset, part in enumerate(reversed(parts)):
part = part.lower()
# Look for the first thing starting with 's3'.
# Until there's a ``.s3`` TLD, we should be OK. :P
if part == 's3':
# If it's by itself, the region is the previous part.
region_name = parts[-offset]
break
elif part.startswith('s3-'):
region_name = self.clean_region_name(part)
break
return region_name
def determine_service_name(self, host):
# Should this signing mechanism ever be used for anything else, this
# will fail. Consider utilizing the logic from the parent class should
# you find yourself here.
return 's3'
def mangle_path_and_params(self, req):
"""
Returns a copy of the request object with fixed ``auth_path/params``
attributes from the original.
"""
modified_req = copy.copy(req)
# Unlike the most other services, in S3, ``req.params`` isn't the only
# source of query string parameters.
# Because of the ``query_args``, we may already have a query string
# **ON** the ``path/auth_path``.
# Rip them apart, so the ``auth_path/params`` can be signed
# appropriately.
parsed_path = urlparse.urlparse(modified_req.auth_path)
modified_req.auth_path = parsed_path.path
if modified_req.params is None:
modified_req.params = {}
raw_qs = parsed_path.query
existing_qs = urlparse.parse_qs(
raw_qs,
keep_blank_values=True
)
# ``parse_qs`` will return lists. Don't do that unless there's a real,
# live list provided.
for key, value in existing_qs.items():
if isinstance(value, (list, tuple)):
if len(value) == 1:
existing_qs[key] = value[0]
modified_req.params.update(existing_qs)
return modified_req
def payload(self, http_request):
if http_request.headers.get('x-amz-content-sha256'):
return http_request.headers['x-amz-content-sha256']
return super(S3HmacAuthV4Handler, self).payload(http_request)
def add_auth(self, req, **kwargs):
if not 'x-amz-content-sha256' in req.headers:
if '_sha256' in req.headers:
req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256')
else:
req.headers['x-amz-content-sha256'] = self.payload(req)
req = self.mangle_path_and_params(req)
return super(S3HmacAuthV4Handler, self).add_auth(req, **kwargs)
class QueryAuthHandler(AuthHandler):
"""
Provides pure query construction (no actual signing).
@ -742,3 +885,24 @@ def get_auth_handler(host, config, provider, requested_capability=None):
# user could override this with a .boto config that includes user-specific
# credentials (for access to user data).
return ready_handlers[-1]
def detect_potential_sigv4(func):
def _wrapper(self):
if hasattr(self, 'region'):
if getattr(self.region, 'endpoint', ''):
if '.cn-' in self.region.endpoint:
return ['hmac-v4']
return func(self)
return _wrapper
def detect_potential_s3sigv4(func):
def _wrapper(self):
if hasattr(self, 'host'):
if '.cn-' in self.host:
return ['hmac-v4-s3']
return func(self)
return _wrapper

View File

@ -45,7 +45,7 @@ class Layer1(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
super(Layer1, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
@ -237,7 +237,8 @@ class Layer1(AWSQueryConnection):
version_label=None, template_name=None,
solution_stack_name=None, cname_prefix=None,
description=None, option_settings=None,
options_to_remove=None):
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""Launches an environment for the application using a configuration.
:type application_name: string
@ -245,14 +246,6 @@ class Layer1(AWSQueryConnection):
version to be deployed. If no application is found with this name,
CreateEnvironment returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type environment_name: string
:param environment_name: A unique name for the deployment environment.
Used in the application URL. Constraint: Must be from 4 to 23
@ -264,6 +257,14 @@ class Layer1(AWSQueryConnection):
name becomes part of the CNAME, and therefore part of the visible
URL for your application.
:type version_label: string
:param version_label: The name of the application version to deploy. If
the specified application has no associated application versions,
AWS Elastic Beanstalk UpdateEnvironment returns an
InvalidParameterValue error. Default: If not specified, AWS
Elastic Beanstalk attempts to launch the most recently created
application version.
:type template_name: string
:param template_name: The name of the configuration template to
use in deployment. If no configuration template is found with this
@ -308,6 +309,25 @@ class Layer1(AWSQueryConnection):
options to remove from the configuration set for this new
environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
"""
@ -330,6 +350,10 @@ class Layer1(AWSQueryConnection):
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.member.Name'] = tier_name
params['Tier.member.Type'] = tier_type
params['Tier.member.Version'] = tier_version
return self._get_response('CreateEnvironment', params)
def create_storage_location(self):
@ -848,9 +872,9 @@ class Layer1(AWSQueryConnection):
return self._get_response('RetrieveEnvironmentInfo', params)
def swap_environment_cnames(self, source_environment_id=None,
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
source_environment_name=None,
destination_environment_id=None,
destination_environment_name=None):
"""Swaps the CNAMEs of two environments.
:type source_environment_id: string
@ -1021,7 +1045,8 @@ class Layer1(AWSQueryConnection):
def update_environment(self, environment_id=None, environment_name=None,
version_label=None, template_name=None,
description=None, option_settings=None,
options_to_remove=None):
options_to_remove=None, tier_name=None,
tier_type=None, tier_version='1.0'):
"""
Updates the environment description, deploys a new application
version, updates the configuration settings to an entirely new
@ -1073,6 +1098,25 @@ class Layer1(AWSQueryConnection):
:param options_to_remove: A list of custom user-defined configuration
options to remove from the configuration set for this environment.
:type tier_name: string
:param tier_name: The name of the tier. Valid values are
"WebServer" and "Worker". Defaults to "WebServer".
The ``tier_name`` and a ``tier_type`` parameters are
related and the values provided must be valid.
The possible combinations are:
* "WebServer" and "Standard" (the default)
* "Worker" and "SQS/HTTP"
:type tier_type: string
:param tier_type: The type of the tier. Valid values are
"Standard" if ``tier_name`` is "WebServer" and "SQS/HTTP"
if ``tier_name`` is "Worker". Defaults to "Standard".
:type tier_version: string
:type tier_version: The version of the tier. Valid values
currently are "1.0". Defaults to "1.0".
:raises: InsufficientPrivilegesException
"""
params = {}
@ -1093,6 +1137,10 @@ class Layer1(AWSQueryConnection):
if options_to_remove:
self.build_list_params(params, options_to_remove,
'OptionsToRemove.member')
if tier_name and tier_type and tier_version:
params['Tier.member.Name'] = tier_name
params['Tier.member.Type'] = tier_type
params['Tier.member.Version'] = tier_version
return self._get_response('UpdateEnvironment', params)
def validate_configuration_settings(self, application_name,

View File

@ -32,6 +32,7 @@ RegionData = {
'ap-northeast-1': 'cloudformation.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'cloudformation.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'cloudformation.ap-southeast-2.amazonaws.com',
'cn-north-1': 'cloudformation.cn-north-1.amazonaws.com.cn',
}

View File

@ -57,7 +57,7 @@ class CloudFormationConnection(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint, CloudFormationConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
super(CloudFormationConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,

View File

@ -1,6 +1,6 @@
from boto.resultset import ResultSet
class Template:
class Template(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
@ -19,7 +19,7 @@ class Template:
else:
setattr(self, name, value)
class TemplateParameter:
class TemplateParameter(object):
def __init__(self, parent):
self.parent = parent
self.default_value = None

View File

@ -44,7 +44,7 @@ class CloudFrontConnection(AWSAuthConnection):
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host,
super(CloudFrontConnection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,

View File

@ -176,7 +176,7 @@ class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
DistributionConfig.__init__(self, connection=connection,
super(StreamingDistributionConfig, self).__init__(connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
@ -684,8 +684,8 @@ class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
Distribution.__init__(self, connection, config, domain_name,
id, last_modified_time, status)
super(StreamingDistribution, self).__init__(connection, config,
domain_name, id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
@ -693,7 +693,8 @@ class StreamingDistribution(Distribution):
self.config = StreamingDistributionConfig()
return self.config
else:
return Distribution.startElement(self, name, attrs, connection)
return super(StreamingDistribution, self).startElement(name, attrs,
connection)
def update(self, enabled=None, cnames=None, comment=None):
"""

View File

@ -14,15 +14,14 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
class OriginAccessIdentity:
class OriginAccessIdentity(object):
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
@ -31,7 +30,7 @@ class OriginAccessIdentity:
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
@ -63,9 +62,9 @@ class OriginAccessIdentity:
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
class OriginAccessIdentityConfig:
class OriginAccessIdentityConfig(object):
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
@ -94,8 +93,8 @@ class OriginAccessIdentityConfig:
else:
setattr(self, name, value)
class OriginAccessIdentitySummary:
class OriginAccessIdentitySummary(object):
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
@ -103,7 +102,7 @@ class OriginAccessIdentitySummary:
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
return None
@ -119,4 +118,4 @@ class OriginAccessIdentitySummary:
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -24,7 +24,7 @@ from boto.s3.key import Key
class Object(Key):
def __init__(self, bucket, name=None):
Key.__init__(self, bucket, name=name)
super(Object, self).__init__(bucket, name=name)
self.distribution = bucket.distribution
def __repr__(self):
@ -43,6 +43,6 @@ class Object(Key):
class StreamingObject(Object):
def url(self, scheme='rtmp'):
return Object.url(self, scheme)
return super(StreamingObject, self).url(scheme)

View File

@ -14,17 +14,16 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Signer:
class Signer(object):
def __init__(self):
self.id = None
self.key_pair_ids = []
def startElement(self, name, attrs, connection):
return None
@ -35,9 +34,9 @@ class Signer:
self.id = value
elif name == 'KeyPairId':
self.key_pair_ids.append(value)
class ActiveTrustedSigners(list):
class ActiveTrustedSigners(list):
def startElement(self, name, attrs, connection):
if name == 'Signer':
s = Signer()
@ -47,8 +46,8 @@ class ActiveTrustedSigners(list):
def endElement(self, name, value, connection):
pass
class TrustedSigners(list):
class TrustedSigners(list):
def startElement(self, name, attrs, connection):
return None

View File

@ -42,8 +42,8 @@ class CloudTrailConnection(AWSQueryConnection):
CloudTrail is a web service that records AWS API calls for your
AWS account and delivers log files to an Amazon S3 bucket. The
recorded information includes the identity of the user, the start
time of the event, the source IP address, the request parameters,
and the response elements returned by the service.
time of the AWS API call, the source IP address, the request
parameters, and the response elements returned by the service.
As an alternative to using the API, you can use one of the AWS
SDKs, which consist of libraries and sample code for various
@ -52,11 +52,11 @@ class CloudTrailConnection(AWSQueryConnection):
programmatic access to AWSCloudTrail. For example, the SDKs take
care of cryptographically signing requests, managing errors, and
retrying requests automatically. For information about the AWS
SDKs, including how to download and install them, see the Tools
for Amazon Web Services page.
SDKs, including how to download and install them, see the `Tools
for Amazon Web Services page`_.
See the CloudTrail User Guide for information about the data that
is included with each event listed in the log files.
is included with each AWS API call listed in the log files.
"""
APIVersion = "2013-11-01"
DefaultRegionName = "us-east-1"
@ -71,10 +71,9 @@ class CloudTrailConnection(AWSQueryConnection):
"TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
"InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
"InvalidTrailNameException": exceptions.InvalidTrailNameException,
"InternalErrorException": exceptions.InternalErrorException,
"TrailNotProvidedException": exceptions.TrailNotProvidedException,
"TrailNotFoundException": exceptions.TrailNotFoundException,
"S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
"TrailNotProvidedException": exceptions.TrailNotProvidedException,
"InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
"MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
"InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
@ -90,75 +89,71 @@ class CloudTrailConnection(AWSQueryConnection):
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
super(CloudTrailConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_trail(self, trail=None):
def create_trail(self, name=None, s3_bucket_name=None,
s3_key_prefix=None, sns_topic_name=None,
include_global_service_events=None, trail=None):
"""
From the command line, use create-subscription.
From the command line, use `create-subscription`.
Creates a trail that specifies the settings for delivery of
log data to an Amazon S3 bucket. The request includes a Trail
structure that specifies the following:
log data to an Amazon S3 bucket.
Support for passing Trail as a parameter ends as early as
February 25, 2014. The request and response examples in this
topic show the use of parameters as well as a Trail object.
Until Trail is removed, you can use either Trail or the
parameter list.
+ Trail name.
+ The name of the Amazon S3 bucket to which CloudTrail
delivers your log files.
+ The name of the Amazon S3 key prefix that precedes each log
file.
+ The name of the Amazon SNS topic that notifies you that a
new file is available in your bucket.
+ Whether the log file should include events from global
services. Currently, the only events included in CloudTrail
log files are from IAM and AWS STS.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
Returns the appropriate HTTP status code if successful. If
not, it returns either one of the CommonErrors or a
FrontEndException with one of the following error codes:
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
**MaximumNumberOfTrailsExceeded**
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
An attempt was made to create more trails than allowed. You
can only create one trail for each account in each region.
**TrailAlreadyExists**
At attempt was made to create a trail with a name that already
exists.
**S3BucketDoesNotExist**
Specified Amazon S3 bucket does not exist.
**InsufficientS3BucketPolicy**
Policy on Amazon S3 bucket does not permit CloudTrail to write
to your bucket. See the AWS AWS CloudTrail User Guide for the
required bucket policy.
**InsufficientSnsTopicPolicy**
The policy on Amazon SNS topic does not permit CloudTrail to
write to it. Can also occur when an Amazon SNS topic does not
exist.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type trail: dict
:param trail: Contains the Trail structure that specifies the settings
for each trail.
:param trail: Support for passing a Trail object in the CreateTrail or
UpdateTrail actions will end as early as February 15, 2014. Instead
of the Trail object and its members, use the parameters listed for
these actions.
"""
params = {}
if name is not None:
params['Name'] = name
if s3_bucket_name is not None:
params['S3BucketName'] = s3_bucket_name
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if trail is not None:
params['trail'] = trail
return self.make_request(action='CreateTrail',
body=json.dumps(params))
def delete_trail(self, name=None):
def delete_trail(self, name):
"""
Deletes a trail.
@ -166,19 +161,17 @@ class CloudTrailConnection(AWSQueryConnection):
:param name: The name of a trail to be deleted.
"""
params = {}
if name is not None:
params['Name'] = name
params = {'Name': name, }
return self.make_request(action='DeleteTrail',
body=json.dumps(params))
def describe_trails(self, trail_name_list=None):
"""
Retrieves the settings for some or all trails associated with
an account. Returns a list of Trail structures in JSON format.
an account.
:type trail_name_list: list
:param trail_name_list: The list of Trail object names.
:param trail_name_list: The list of trails.
"""
params = {}
@ -187,97 +180,153 @@ class CloudTrailConnection(AWSQueryConnection):
return self.make_request(action='DescribeTrails',
body=json.dumps(params))
def get_trail_status(self, name=None):
def get_trail_status(self, name):
"""
Returns GetTrailStatusResult, which contains a JSON-formatted
list of information about the trail specified in the request.
JSON fields include information such as delivery errors,
Amazon SNS and Amazon S3 errors, and times that logging
started and stopped for each trail.
Returns a JSON-formatted list of information about the
specified trail. Fields include information on delivery
errors, Amazon SNS and Amazon S3 errors, and start and stop
logging times for each trail.
The CloudTrail API is currently undergoing revision. This
action currently returns both new fields and fields slated for
removal from the API. The following lists indicate the plans
for each field:
**List of Members Planned for Ongoing Support**
+ IsLogging
+ LatestDeliveryTime
+ LatestNotificationTime
+ StartLoggingTime
+ StopLoggingTime
+ LatestNotificationError
+ LatestDeliveryError
**List of Members Scheduled for Removal**
+ **LatestDeliveryAttemptTime**: Use LatestDeliveryTime
instead.
+ **LatestNotificationAttemptTime**: Use
LatestNotificationTime instead.
+ **LatestDeliveryAttemptSucceeded**: No replacement. See the
note following this list.
+ **LatestNotificationAttemptSucceeded**: No replacement. See
the note following this list.
+ **TimeLoggingStarted**: Use StartLoggingTime instead.
+ **TimeLoggingStopped**: Use StopLoggingtime instead.
No replacements have been created for
LatestDeliveryAttemptSucceeded and
LatestNotificationAttemptSucceeded . Use LatestDeliveryError
and LatestNotificationError to evaluate success or failure of
log delivery or notification. Empty values returned for these
fields indicate success. An error in LatestDeliveryError
generally indicates either a missing bucket or insufficient
permissions to write to the bucket. Similarly, an error in
LatestNotificationError indicates either a missing topic or
insufficient permissions.
:type name: string
:param name: The name of the trail for which you are requesting the
current status.
"""
params = {}
if name is not None:
params['Name'] = name
params = {'Name': name, }
return self.make_request(action='GetTrailStatus',
body=json.dumps(params))
def start_logging(self, name=None):
def start_logging(self, name):
"""
Starts the processing of recording user activity events and
log file delivery for a trail.
Starts the recording of AWS API calls and log file delivery
for a trail.
:type name: string
:param name: The name of the Trail for which CloudTrail logs events.
:param name: The name of the trail for which CloudTrail logs AWS API
calls.
"""
params = {}
if name is not None:
params['Name'] = name
params = {'Name': name, }
return self.make_request(action='StartLogging',
body=json.dumps(params))
def stop_logging(self, name=None):
def stop_logging(self, name):
"""
Suspends the recording of user activity events and log file
delivery for the specified trail. Under most circumstances,
there is no need to use this action. You can update a trail
without stopping it first. This action is the only way to stop
logging activity.
Suspends the recording of AWS API calls and log file delivery
for the specified trail. Under most circumstances, there is no
need to use this action. You can update a trail without
stopping it first. This action is the only way to stop
recording.
:type name: string
:param name: Communicates to CloudTrail the name of the Trail for which
to stop logging events.
:param name: Communicates to CloudTrail the name of the trail for which
to stop logging AWS API calls.
"""
params = {'Name': name, }
return self.make_request(action='StopLogging',
body=json.dumps(params))
def update_trail(self, name=None, s3_bucket_name=None,
s3_key_prefix=None, sns_topic_name=None,
include_global_service_events=None, trail=None):
"""
From the command line, use `update-subscription`.
Updates the settings that specify delivery of log files.
Changes to a trail do not require stopping the CloudTrail
service. Use this action to designate an existing bucket for
log delivery. If the existing bucket has previously been a
target for CloudTrail log files, an IAM policy exists for the
bucket.
Support for passing Trail as a parameter ends as early as
February 25, 2014. The request and response examples in this
topic show the use of parameters as well as a Trail object.
Until Trail is removed, you can use either Trail or the
parameter list.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type trail: dict
:param trail: Support for passing a Trail object in the CreateTrail or
UpdateTrail actions will end as early as February 15, 2014. Instead
of the Trail object and its members, use the parameters listed for
these actions.
"""
params = {}
if name is not None:
params['Name'] = name
return self.make_request(action='StopLogging',
body=json.dumps(params))
def update_trail(self, trail=None):
"""
From the command line, use update-subscription.
Updates the settings that specify delivery of log files.
Changes to a trail do not require stopping the CloudTrail
service. You can use this action to designate an existing
bucket for log delivery, or to create a new bucket and prefix.
If the existing bucket has previously been a target for
CloudTrail log files, an IAM policy exists for the bucket. If
you create a new bucket using UpdateTrail, you need to apply
the policy to the bucket using one of the means provided by
the Amazon S3 service.
The request includes a Trail structure that specifies the
following:
+ Trail name.
+ The name of the Amazon S3 bucket to which CloudTrail
delivers your log files.
+ The name of the Amazon S3 key prefix that precedes each log
file.
+ The name of the Amazon SNS topic that notifies you that a
new file is available in your bucket.
+ Whether the log file should include events from global
services, such as IAM or AWS STS.
**CreateTrail** returns the appropriate HTTP status code if
successful. If not, it returns either one of the common errors
or one of the exceptions listed at the end of this page.
:type trail: dict
:param trail: Represents the Trail structure that contains the
CloudTrail setting for an account.
"""
params = {}
if s3_bucket_name is not None:
params['S3BucketName'] = s3_bucket_name
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if trail is not None:
params['trail'] = trail
return self.make_request(action='UpdateTrail',

View File

@ -372,7 +372,8 @@ class HTTPRequest(object):
for key in self.headers:
val = self.headers[key]
if isinstance(val, unicode):
self.headers[key] = urllib.quote_plus(val.encode('utf-8'))
safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~'
self.headers[key] = urllib.quote_plus(val.encode('utf-8'), safe)
connection._auth_handler.add_auth(self, **kwargs)
@ -839,6 +840,13 @@ class AWSAuthConnection(object):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
def set_host_header(self, request):
try:
request.headers['Host'] = \
self._auth_handler.host_header(self.host, request)
except AttributeError:
request.headers['Host'] = self.host.split(':', 1)[0]
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
@ -879,7 +887,8 @@ class AWSAuthConnection(object):
# the port info. All others should be now be up to date and
# not include the port.
if 's3' not in self._required_auth_capability():
request.headers['Host'] = self.host.split(':', 1)[0]
self.set_host_header(request)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
@ -1025,7 +1034,7 @@ class AWSQueryConnection(AWSAuthConnection):
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
super(AWSQueryConnection, self).__init__(host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -43,7 +43,7 @@ class YAMLMessage(Message):
def __init__(self, queue=None, body='', xml_attrs=None):
self.data = None
Message.__init__(self, queue, body)
super(YAMLMessage, self).__init__(queue, body)
def set_body(self, body):
self.data = yaml.load(body)

View File

@ -90,7 +90,7 @@ class DataPipelineConnection(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
super(DataPipelineConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):

View File

@ -0,0 +1,66 @@
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the AWS DirectConnect service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.directconnect.layer1 import DirectConnectConnection
return [RegionInfo(name='us-east-1',
endpoint='directconnect.us-east-1.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='us-west-1',
endpoint='directconnect.us-west-1.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='us-west-2',
endpoint='directconnect.us-west-2.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='eu-west-1',
endpoint='directconnect.eu-west-1.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='ap-southeast-1',
endpoint='directconnect.ap-southeast-1.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='ap-southeast-2',
endpoint='directconnect.ap-southeast-2.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='ap-southeast-3',
endpoint='directconnect.ap-southeast-3.amazonaws.com',
connection_cls=DirectConnectConnection),
RegionInfo(name='sa-east-1',
endpoint='directconnect.sa-east-1.amazonaws.com',
connection_cls=DirectConnectConnection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None

View File

@ -0,0 +1,28 @@
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class DirectConnectClientException(Exception):
pass
class DirectConnectServerException(Exception):
pass

View File

@ -0,0 +1,633 @@
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.directconnect import exceptions
class DirectConnectConnection(AWSQueryConnection):
"""
AWS Direct Connect makes it easy to establish a dedicated network
connection from your premises to Amazon Web Services (AWS). Using
AWS Direct Connect, you can establish private connectivity between
AWS and your data center, office, or colocation environment, which
in many cases can reduce your network costs, increase bandwidth
throughput, and provide a more consistent network experience than
Internet-based connections.
The AWS Direct Connect API Reference provides descriptions,
syntax, and usage examples for each of the actions and data types
for AWS Direct Connect. Use the following links to get started
using the AWS Direct Connect API Reference :
+ `Actions`_: An alphabetical list of all AWS Direct Connect
actions.
+ `Data Types`_: An alphabetical list of all AWS Direct Connect
data types.
+ `Common Query Parameters`_: Parameters that all Query actions
can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
"""
APIVersion = "2012-10-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com"
ServiceName = "DirectConnect"
TargetPrefix = "OvertureService"
ResponseError = JSONResponseError
_faults = {
"DirectConnectClientException": exceptions.DirectConnectClientException,
"DirectConnectServerException": exceptions.DirectConnectServerException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DirectConnectConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def allocate_connection_on_interconnect(self, bandwidth, connection_name,
owner_account, interconnect_id,
vlan):
"""
Creates a hosted connection on an interconnect.
Allocates a VLAN number and a specified amount of bandwidth
for use by a hosted connection on the given interconnect.
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: " 500Mbps "
Default: None
:type connection_name: string
:param connection_name: Name of the provisioned connection.
Example: " 500M Connection to AWS "
Default: None
:type owner_account: string
:param owner_account: Numeric account Id of the customer for whom the
connection will be provisioned.
Example: 123443215678
Default: None
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which the connection
will be provisioned.
Example: dxcon-456abc78
Default: None
:type vlan: integer
:param vlan: The dedicated VLAN provisioned to the connection.
Example: 101
Default: None
"""
params = {
'bandwidth': bandwidth,
'connectionName': connection_name,
'ownerAccount': owner_account,
'interconnectId': interconnect_id,
'vlan': vlan,
}
return self.make_request(action='AllocateConnectionOnInterconnect',
body=json.dumps(params))
def allocate_private_virtual_interface(self, connection_id,
owner_account,
new_private_virtual_interface_allocation):
"""
Provisions a private virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
private virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPrivateVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the private virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new private
virtual interface.
Default: None
:type new_private_virtual_interface_allocation: dict
:param new_private_virtual_interface_allocation: Detailed information
for the private virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation,
}
return self.make_request(action='AllocatePrivateVirtualInterface',
body=json.dumps(params))
def allocate_public_virtual_interface(self, connection_id, owner_account,
new_public_virtual_interface_allocation):
"""
Provisions a public virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
public virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPublicVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the public virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new public
virtual interface.
Default: None
:type new_public_virtual_interface_allocation: dict
:param new_public_virtual_interface_allocation: Detailed information
for the public virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation,
}
return self.make_request(action='AllocatePublicVirtualInterface',
body=json.dumps(params))
def confirm_connection(self, connection_id):
"""
Confirm the creation of a hosted connection on an
interconnect.
Upon creation, the hosted connection is initially in the
'Ordering' state, and will remain in this state until the
owner calls ConfirmConnection to confirm creation of the
hosted connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='ConfirmConnection',
body=json.dumps(params))
def confirm_private_virtual_interface(self, virtual_interface_id,
virtual_gateway_id):
"""
Accept ownership of a private virtual interface created by
another customer.
After the virtual interface owner calls this function, the
virtual interface will be created and attached to the given
virtual private gateway, and will be available for handling
traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
:type virtual_gateway_id: string
:param virtual_gateway_id: ID of the virtual private gateway that will
be attached to the virtual interface.
A virtual private gateway can be managed via the Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
Default: None
"""
params = {
'virtualInterfaceId': virtual_interface_id,
'virtualGatewayId': virtual_gateway_id,
}
return self.make_request(action='ConfirmPrivateVirtualInterface',
body=json.dumps(params))
def confirm_public_virtual_interface(self, virtual_interface_id):
"""
Accept ownership of a public virtual interface created by
another customer.
After the virtual interface owner calls this function, the
specified virtual interface will be created and made available
for handling traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='ConfirmPublicVirtualInterface',
body=json.dumps(params))
def create_connection(self, location, bandwidth, connection_name):
"""
Creates a new connection between the customer network and a
specific AWS Direct Connect location.
A connection links your internal network to an AWS Direct
Connect location over a standard 1 gigabit or 10 gigabit
Ethernet fiber-optic cable. One end of the cable is connected
to your router, the other to an AWS Direct Connect router. An
AWS Direct Connect location provides access to Amazon Web
Services in the region it is associated with. You can
establish connections with AWS Direct Connect locations in
multiple regions, but a connection in one region does not
provide connectivity to other regions.
:type location: string
:param location: Where the connection is located.
Example: EqSV5
Default: None
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: 1Gbps
Default: None
:type connection_name: string
:param connection_name: The name of the connection.
Example: " My Connection to AWS "
Default: None
"""
params = {
'location': location,
'bandwidth': bandwidth,
'connectionName': connection_name,
}
return self.make_request(action='CreateConnection',
body=json.dumps(params))
def create_interconnect(self, interconnect_name, bandwidth, location):
"""
Creates a new interconnect between a AWS Direct Connect
partner's network and a specific AWS Direct Connect location.
An interconnect is a connection which is capable of hosting
other connections. The AWS Direct Connect partner can use an
interconnect to provide sub-1Gbps AWS Direct Connect service
to tier 2 customers who do not have their own connections.
Like a standard connection, an interconnect links the AWS
Direct Connect partner's network to an AWS Direct Connect
location over a standard 1 Gbps or 10 Gbps Ethernet fiber-
optic cable. One end is connected to the partner's router, the
other to an AWS Direct Connect router.
For each end customer, the AWS Direct Connect partner
provisions a connection on their interconnect by calling
AllocateConnectionOnInterconnect. The end customer can then
connect to AWS resources by creating a virtual interface on
their connection, using the VLAN assigned to them by the AWS
Direct Connect partner.
:type interconnect_name: string
:param interconnect_name: The name of the interconnect.
Example: " 1G Interconnect to AWS "
Default: None
:type bandwidth: string
:param bandwidth: The port bandwidth
Example: 1Gbps
Default: None
Available values: 1Gbps,10Gbps
:type location: string
:param location: Where the interconnect is located
Example: EqSV5
Default: None
"""
params = {
'interconnectName': interconnect_name,
'bandwidth': bandwidth,
'location': location,
}
return self.make_request(action='CreateInterconnect',
body=json.dumps(params))
def create_private_virtual_interface(self, connection_id,
new_private_virtual_interface):
"""
Creates a new private virtual interface. A virtual interface
is the VLAN that transports AWS Direct Connect traffic. A
private virtual interface supports sending traffic to a single
virtual private cloud (VPC).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_private_virtual_interface: dict
:param new_private_virtual_interface: Detailed information for the
private virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPrivateVirtualInterface': new_private_virtual_interface,
}
return self.make_request(action='CreatePrivateVirtualInterface',
body=json.dumps(params))
def create_public_virtual_interface(self, connection_id,
new_public_virtual_interface):
"""
Creates a new public virtual interface. A virtual interface is
the VLAN that transports AWS Direct Connect traffic. A public
virtual interface supports sending traffic to public services
of AWS such as Amazon Simple Storage Service (Amazon S3).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_public_virtual_interface: dict
:param new_public_virtual_interface: Detailed information for the
public virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPublicVirtualInterface': new_public_virtual_interface,
}
return self.make_request(action='CreatePublicVirtualInterface',
body=json.dumps(params))
def delete_connection(self, connection_id):
"""
Deletes the connection.
Deleting a connection only stops the AWS Direct Connect port
hour and data transfer charges. You need to cancel separately
with the providers any services or charges for cross-connects
or network circuits that connect you to the AWS Direct Connect
location.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='DeleteConnection',
body=json.dumps(params))
def delete_interconnect(self, interconnect_id):
"""
Deletes the specified interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DeleteInterconnect',
body=json.dumps(params))
def delete_virtual_interface(self, virtual_interface_id):
"""
Deletes a virtual interface.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='DeleteVirtualInterface',
body=json.dumps(params))
def describe_connections(self, connection_id=None):
"""
Displays all connections in this region.
If a connection ID is provided, the call returns only that
particular connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
return self.make_request(action='DescribeConnections',
body=json.dumps(params))
def describe_connections_on_interconnect(self, interconnect_id):
"""
Return a list of connections that have been provisioned on the
given interconnect.
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which a list of
connection is provisioned.
Example: dxcon-abc123
Default: None
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DescribeConnectionsOnInterconnect',
body=json.dumps(params))
def describe_interconnects(self, interconnect_id=None):
"""
Returns a list of interconnects owned by the AWS account.
If an interconnect ID is provided, it will only return this
particular interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {}
if interconnect_id is not None:
params['interconnectId'] = interconnect_id
return self.make_request(action='DescribeInterconnects',
body=json.dumps(params))
def describe_locations(self):
"""
Returns the list of AWS Direct Connect locations in the
current AWS region. These are the locations that may be
selected when calling CreateConnection or CreateInterconnect.
"""
params = {}
return self.make_request(action='DescribeLocations',
body=json.dumps(params))
def describe_virtual_gateways(self):
"""
Returns a list of virtual private gateways owned by the AWS
account.
You can create one or more AWS Direct Connect private virtual
interfaces linking to a virtual private gateway. A virtual
private gateway can be managed via Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
"""
params = {}
return self.make_request(action='DescribeVirtualGateways',
body=json.dumps(params))
def describe_virtual_interfaces(self, connection_id=None,
virtual_interface_id=None):
"""
Displays all virtual interfaces for an AWS account. Virtual
interfaces deleted fewer than 15 minutes before
DescribeVirtualInterfaces is called are also returned. If a
connection ID is included then only virtual interfaces
associated with this connection will be returned. If a virtual
interface ID is included then only a single virtual interface
will be returned.
A virtual interface (VLAN) transmits the traffic between the
AWS Direct Connect location and the customer.
If a connection ID is provided, only virtual interfaces
provisioned on the specified connection will be returned. If a
virtual interface ID is provided, only this particular virtual
interface will be returned.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
if virtual_interface_id is not None:
params['virtualInterfaceId'] = virtual_interface_id
return self.make_request(action='DescribeVirtualInterfaces',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read()
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)

View File

@ -59,6 +59,9 @@ def regions():
RegionInfo(name='sa-east-1',
endpoint='dynamodb.sa-east-1.amazonaws.com',
connection_cls=boto.dynamodb.layer2.Layer2),
RegionInfo(name='cn-north-1',
endpoint='dynamodb.cn-north-1.amazonaws.com.cn',
connection_cls=boto.dynamodb.layer2.Layer2),
]

View File

@ -84,7 +84,7 @@ class Layer1(AWSAuthConnection):
break
self.region = region
AWSAuthConnection.__init__(self, self.region.endpoint,
super(Layer1, self).__init__(self.region.endpoint,
aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,

View File

@ -59,6 +59,9 @@ def regions():
RegionInfo(name='sa-east-1',
endpoint='dynamodb.sa-east-1.amazonaws.com',
connection_cls=DynamoDBConnection),
RegionInfo(name='cn-north-1',
endpoint='dynamodb.cn-north-1.amazonaws.com.cn',
connection_cls=DynamoDBConnection),
]

View File

@ -91,10 +91,10 @@ class RangeKey(BaseSchemaField):
class BaseIndexField(object):
"""
An abstract class for defining schema fields.
An abstract class for defining schema indexes.
Contains most of the core functionality for the field. Subclasses must
define an ``attr_type`` to pass to DynamoDB.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
def __init__(self, name, parts):
self.name = name
@ -139,7 +139,7 @@ class BaseIndexField(object):
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY,
'ProjectionType': 'KEYS_ONLY',
}
}
@ -210,3 +210,125 @@ class IncludeIndex(BaseIndexField):
schema_data = super(IncludeIndex, self).schema()
schema_data['Projection']['NonKeyAttributes'] = self.includes_fields
return schema_data
class GlobalBaseIndexField(BaseIndexField):
"""
An abstract class for defining global indexes.
Contains most of the core functionality for the index. Subclasses must
define a ``projection_type`` to pass to DynamoDB.
"""
throughput = {
'read': 5,
'write': 5,
}
def __init__(self, *args, **kwargs):
throughput = kwargs.pop('throughput', None)
if throughput is not None:
self.throughput = throughput
super(GlobalBaseIndexField, self).__init__(*args, **kwargs)
def schema(self):
"""
Returns the schema structure DynamoDB expects.
Example::
>>> index.schema()
{
'IndexName': 'LastNameIndex',
'KeySchema': [
{
'AttributeName': 'username',
'KeyType': 'HASH',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
"""
schema_data = super(GlobalBaseIndexField, self).schema()
schema_data['ProvisionedThroughput'] = {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
}
return schema_data
class GlobalAllIndex(GlobalBaseIndexField):
"""
An index signifying all fields should be in the index.
Example::
>>> GlobalAllIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'ALL'
class GlobalKeysOnlyIndex(GlobalBaseIndexField):
"""
An index signifying only key fields should be in the index.
Example::
>>> GlobalKeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'KEYS_ONLY'
class GlobalIncludeIndex(GlobalBaseIndexField, IncludeIndex):
"""
An index signifying only certain fields should be in the index.
Example::
>>> GlobalIncludeIndex('GenderIndex', parts=[
... HashKey('username'),
... RangeKey('date_joined')
... ],
... includes=['gender'],
... throughput={
... 'read': 2,
... 'write': 1,
... })
"""
projection_type = 'INCLUDE'
def __init__(self, *args, **kwargs):
IncludeIndex.__init__(self, *args, **kwargs)
GlobalBaseIndexField.__init__(self, *args, **kwargs)
def schema(self):
# Pick up the includes.
schema_data = IncludeIndex.schema(self)
# Also the throughput.
schema_data.update(GlobalBaseIndexField.schema(self))
return schema_data

View File

@ -19,6 +19,9 @@ class Item(object):
This object presents a dictionary-like interface for accessing/storing
data. It also tries to intelligently track how data has changed throughout
the life of the instance, to be as efficient as possible about updates.
Empty items, or items that have no data, are considered falsey.
"""
def __init__(self, table, data=None, loaded=False):
"""
@ -105,6 +108,9 @@ class Item(object):
def __contains__(self, key):
return key in self._data
def __nonzero__(self):
return bool(self._data)
def _determine_alterations(self):
"""
Checks the ``-orig_data`` against the ``_data`` to determine what

View File

@ -35,10 +35,9 @@ from boto.dynamodb2 import exceptions
class DynamoDBConnection(AWSQueryConnection):
"""
Amazon DynamoDB is a fast, highly scalable, highly available,
cost-effective non-relational database service. Amazon DynamoDB
removes traditional scalability limitations on data storage while
maintaining low latency and predictable performance.
Amazon DynamoDB **Overview**
This is the Amazon DynamoDB API Reference. This guide provides
descriptions and samples of the Amazon DynamoDB API.
"""
APIVersion = "2012-08-10"
DefaultRegionName = "us-east-1"
@ -76,7 +75,7 @@ class DynamoDBConnection(AWSQueryConnection):
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
super(DynamoDBConnection, self).__init__(**kwargs)
self.region = region
self._validate_checksums = boto.config.getbool(
'DynamoDB', 'validate_checksums', validate_checksums)
@ -130,7 +129,7 @@ class DynamoDBConnection(AWSQueryConnection):
result. Requests for nonexistent items consume the minimum
read capacity units according to the type of read. For more
information, see `Capacity Units Calculations`_ in the Amazon
DynamoDB Developer Guide .
DynamoDB Developer Guide.
:type request_items: map
:param request_items:
@ -150,7 +149,9 @@ class DynamoDBConnection(AWSQueryConnection):
`False` (the default), an eventually consistent read is used.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
"""
params = {'RequestItems': request_items, }
@ -256,7 +257,9 @@ class DynamoDBConnection(AWSQueryConnection):
match those of the schema in the table's attribute definition.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@ -274,7 +277,8 @@ class DynamoDBConnection(AWSQueryConnection):
body=json.dumps(params))
def create_table(self, attribute_definitions, table_name, key_schema,
provisioned_throughput, local_secondary_indexes=None):
provisioned_throughput, local_secondary_indexes=None,
global_secondary_indexes=None):
"""
The CreateTable operation adds a new table to your account. In
an AWS account, table names must be unique within each region.
@ -306,7 +310,7 @@ class DynamoDBConnection(AWSQueryConnection):
:param key_schema: Specifies the attributes that make up the primary
key for the table. The attributes in KeySchema must also be defined
in the AttributeDefinitions array. For more information, see `Data
Model`_ in the Amazon DynamoDB Developer Guide .
Model`_ in the Amazon DynamoDB Developer Guide.
Each KeySchemaElement in the array is composed of:
@ -323,7 +327,7 @@ class DynamoDBConnection(AWSQueryConnection):
KeyType of `RANGE`.
For more information, see `Specifying the Primary Key`_ in the Amazon
DynamoDB Developer Guide .
DynamoDB Developer Guide.
:type local_secondary_indexes: list
:param local_secondary_indexes:
@ -360,8 +364,15 @@ class DynamoDBConnection(AWSQueryConnection):
attribute into two different indexes, this counts as two distinct
attributes when determining the total.
:type global_secondary_indexes: list
:param global_secondary_indexes:
:type provisioned_throughput: dict
:param provisioned_throughput:
:param provisioned_throughput: The provisioned throughput settings for
the specified table. The settings can be modified using the
UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide.
"""
params = {
@ -372,6 +383,8 @@ class DynamoDBConnection(AWSQueryConnection):
}
if local_secondary_indexes is not None:
params['LocalSecondaryIndexes'] = local_secondary_indexes
if global_secondary_indexes is not None:
params['GlobalSecondaryIndexes'] = global_secondary_indexes
return self.make_request(action='CreateTable',
body=json.dumps(params))
@ -459,7 +472,9 @@ class DynamoDBConnection(AWSQueryConnection):
+ `ALL_OLD` - The content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@ -496,15 +511,6 @@ class DynamoDBConnection(AWSQueryConnection):
operations, such as GetItem and PutItem , on a table in the
`DELETING` state until the table deletion is complete.
Tables are unique among those associated with the AWS Account
issuing the request, and the AWS region that receives the
request (such as dynamodb.us-east-1.amazonaws.com). Each
Amazon DynamoDB endpoint is entirely independent. For example,
if you have two tables called "MyTable," one in dynamodb.us-
east-1.amazonaws.com and one in dynamodb.us-
west-1.amazonaws.com, they are completely independent and do
not share any data; deleting one does not delete the other.
When you delete a table, any local secondary indexes on that
table are also deleted.
@ -564,7 +570,9 @@ class DynamoDBConnection(AWSQueryConnection):
are used.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
"""
params = {'TableName': table_name, 'Key': key, }
@ -582,14 +590,6 @@ class DynamoDBConnection(AWSQueryConnection):
Returns an array of all the tables associated with the current
account and endpoint.
Each Amazon DynamoDB endpoint is entirely independent. For
example, if you have two tables called "MyTable," one in
dynamodb.us-east-1.amazonaws.com and one in dynamodb.us-
west-1.amazonaws.com , they are completely independent and do
not share any data. The ListTables operation returns all of
the table names associated with the account making the
request, for the endpoint that receives the request.
:type exclusive_start_table_name: string
:param exclusive_start_table_name: The name of the table that starts
the list. If you already ran a ListTables operation and received a
@ -639,7 +639,7 @@ class DynamoDBConnection(AWSQueryConnection):
primary key attribute, or attributes.
For more information about using this API, see `Working with
Items`_ in the Amazon DynamoDB Developer Guide .
Items`_ in the Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table to contain the item.
@ -653,7 +653,7 @@ class DynamoDBConnection(AWSQueryConnection):
the table's attribute definition.
For more information about primary keys, see `Primary Key`_ in the
Amazon DynamoDB Developer Guide .
Amazon DynamoDB Developer Guide.
Each element in the Item map is an AttributeValue object.
@ -714,7 +714,9 @@ class DynamoDBConnection(AWSQueryConnection):
the content of the old item is returned.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@ -834,7 +836,7 @@ class DynamoDBConnection(AWSQueryConnection):
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
`Query and Scan`_ in the Amazon DynamoDB Developer Guide .
`Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type consistent_read: boolean
:param consistent_read: If set to `True`, then the operation uses
@ -846,7 +848,7 @@ class DynamoDBConnection(AWSQueryConnection):
The selection criteria for the query.
For a query on a table, you can only have conditions on the table
primary key attributes. you must specify the hash key attribute
primary key attributes. You must specify the hash key attribute
name and value as an `EQ` condition. You can optionally specify a
second condition, referring to the range key attribute.
@ -878,7 +880,7 @@ class DynamoDBConnection(AWSQueryConnection):
example, equals, greater than, less than, etc. Valid comparison
operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH |
BETWEEN` For information on specifying data types in JSON, see
`JSON Data Format`_ in the Amazon DynamoDB Developer Guide . The
`JSON Data Format`_ in the Amazon DynamoDB Developer Guide. The
following are descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
@ -938,18 +940,16 @@ class DynamoDBConnection(AWSQueryConnection):
ascending order.
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the item from which to
continue an earlier operation. An earlier operation might provide
this value as the LastEvaluatedKey if that operation was
interrupted before completion; either because of the result set
size or because of the setting for Limit . The LastEvaluatedKey can
be passed back in a new request to continue the operation from that
point.
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
"""
params = {'TableName': table_name, }
@ -994,10 +994,10 @@ class DynamoDBConnection(AWSQueryConnection):
The result set is eventually consistent.
By default, Scan operations proceed sequentially; however, for
faster performance on large tables, applications can perform a
faster performance on large tables, applications can request a
parallel Scan by specifying the Segment and TotalSegments
parameters. For more information, see `Parallel Scan`_ in the
Amazon DynamoDB Developer Guide .
Amazon DynamoDB Developer Guide.
:type table_name: string
:param table_name: The name of the table containing the requested
@ -1020,7 +1020,7 @@ class DynamoDBConnection(AWSQueryConnection):
limit, it stops the operation and returns the matching values up to
the limit, and a LastEvaluatedKey to apply in a subsequent
operation to continue the operation. For more information see
`Query and Scan`_ in the Amazon DynamoDB Developer Guide .
`Query and Scan`_ in the Amazon DynamoDB Developer Guide.
:type select: string
:param select: The attributes to be returned in the result. You can
@ -1084,7 +1084,7 @@ class DynamoDBConnection(AWSQueryConnection):
operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL
| CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For
information on specifying data types in JSON, see `JSON Data
Format`_ in the Amazon DynamoDB Developer Guide . The following are
Format`_ in the Amazon DynamoDB Developer Guide. The following are
descriptions of each comparison operator.
+ `EQ` : Equal. AttributeValueList can contain only one AttributeValue
@ -1164,44 +1164,27 @@ class DynamoDBConnection(AWSQueryConnection):
"2", "1"]}`
:type exclusive_start_key: map
:param exclusive_start_key: The primary key of the item from which to
continue an earlier operation. An earlier operation might provide
this value as the LastEvaluatedKey if that operation was
interrupted before completion; either because of the result set
size or because of the setting for Limit . The LastEvaluatedKey can
be passed back in a new request to continue the operation from that
point.
:param exclusive_start_key: The primary key of the first item that this
operation will evaluate. Use the value that was returned for
LastEvaluatedKey in the previous operation.
The data type for ExclusiveStartKey must be String, Number or Binary.
No set data types are allowed.
If you are performing a parallel scan, the value of ExclusiveStartKey
must fall into the key space of the Segment being scanned. For
example, suppose that there are two application threads scanning a
table using the following Scan parameters
+ Thread 0: Segment =0; TotalSegments =2
+ Thread 1: Segment =1; TotalSegments =2
Now suppose that the Scan request for Thread 0 completed and returned a
LastEvaluatedKey of "X". Because "X" is part of Segment 0's key
space, it cannot be used anywhere else in the table. If Thread 1
were to issue another Scan request with an ExclusiveStartKey of
"X", Amazon DynamoDB would throw an InputValidationError because
hash key "X" cannot be in Segment 1.
In a parallel scan, a Scan request that includes ExclusiveStartKey must
specify the same segment whose previous Scan returned the
corresponding value of LastEvaluatedKey .
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type total_segments: integer
:param total_segments: For parallel Scan requests, TotalSegments
represents the total number of segments for a table that is being
scanned. Segments are a way to logically divide a table into
equally sized portions, for the duration of the Scan request. The
value of TotalSegments corresponds to the number of application
"workers" (such as threads or processes) that will perform the
parallel Scan . For example, if you want to scan a table using four
:param total_segments: For a parallel Scan request, TotalSegments
represents the total number of segments into which the Scan
operation will be divided. The value of TotalSegments corresponds
to the number of application workers that will perform the parallel
scan. For example, if you want to scan a table using four
application threads, you would specify a TotalSegments value of 4.
The value for TotalSegments must be greater than or equal to 1, and
less than or equal to 4096. If you specify a TotalSegments value of
@ -1210,15 +1193,17 @@ class DynamoDBConnection(AWSQueryConnection):
If you specify TotalSegments , you must also specify Segment .
:type segment: integer
:param segment: For parallel Scan requests, Segment identifies an
individual segment to be scanned by an application "worker" (such
as a thread or a process). Each worker issues a Scan request with a
distinct value for the segment it will scan.
:param segment: For a parallel Scan request, Segment identifies an
individual segment to be scanned by an application worker.
Segment IDs are zero-based, so the first segment is always 0. For
example, if you want to scan a table using four application
threads, the first thread would specify a Segment value of 0, the
second thread would specify 1, and so on.
The value of LastEvaluatedKey returned from a parallel Scan request
must be used as ExclusiveStartKey with the same Segment ID in a
subsequent Scan operation.
The value for Segment must be greater than or equal to 0, and less than
the value provided for TotalSegments .
@ -1411,7 +1396,9 @@ class DynamoDBConnection(AWSQueryConnection):
returned.
:type return_consumed_capacity: string
:param return_consumed_capacity:
:param return_consumed_capacity: If set to `TOTAL`, ConsumedCapacity is
included in the response; if set to `NONE` (the default),
ConsumedCapacity is not included.
:type return_item_collection_metrics: string
:param return_item_collection_metrics: If set to `SIZE`, statistics
@ -1434,7 +1421,8 @@ class DynamoDBConnection(AWSQueryConnection):
return self.make_request(action='UpdateItem',
body=json.dumps(params))
def update_table(self, table_name, provisioned_throughput):
def update_table(self, table_name, provisioned_throughput=None,
global_secondary_index_updates=None):
"""
Updates the provisioned throughput for the given table.
Setting the throughput for a table helps you manage
@ -1443,7 +1431,7 @@ class DynamoDBConnection(AWSQueryConnection):
The provisioned throughput values can be upgraded or
downgraded based on the maximums and minimums listed in the
`Limits`_ section in the Amazon DynamoDB Developer Guide .
`Limits`_ section in the Amazon DynamoDB Developer Guide.
The table must be in the `ACTIVE` state for this operation to
succeed. UpdateTable is an asynchronous operation; while
@ -1462,13 +1450,21 @@ class DynamoDBConnection(AWSQueryConnection):
:param table_name: The name of the table to be updated.
:type provisioned_throughput: dict
:param provisioned_throughput:
:param provisioned_throughput: The provisioned throughput settings for
the specified table. The settings can be modified using the
UpdateTable operation.
For current minimum and maximum provisioned throughput values, see
`Limits`_ in the Amazon DynamoDB Developer Guide.
:type global_secondary_index_updates: list
:param global_secondary_index_updates:
"""
params = {
'TableName': table_name,
'ProvisionedThroughput': provisioned_throughput,
}
params = {'TableName': table_name, }
if provisioned_throughput is not None:
params['ProvisionedThroughput'] = provisioned_throughput
if global_secondary_index_updates is not None:
params['GlobalSecondaryIndexUpdates'] = global_secondary_index_updates
return self.make_request(action='UpdateTable',
body=json.dumps(params))

View File

@ -1,7 +1,9 @@
import boto
from boto.dynamodb2 import exceptions
from boto.dynamodb2.fields import (HashKey, RangeKey,
AllIndex, KeysOnlyIndex, IncludeIndex)
AllIndex, KeysOnlyIndex, IncludeIndex,
GlobalAllIndex, GlobalKeysOnlyIndex,
GlobalIncludeIndex)
from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
@ -21,7 +23,7 @@ class Table(object):
max_batch_get = 100
def __init__(self, table_name, schema=None, throughput=None, indexes=None,
connection=None):
global_indexes=None, connection=None):
"""
Sets up a new in-memory ``Table``.
@ -48,6 +50,10 @@ class Table(object):
Optionally accepts a ``indexes`` parameter, which should be a list of
``BaseIndexField`` subclasses representing the desired indexes.
Optionally accepts a ``global_indexes`` parameter, which should be a
list of ``GlobalBaseIndexField`` subclasses representing the desired
indexes.
Optionally accepts a ``connection`` parameter, which should be a
``DynamoDBConnection`` instance (or subclass). This is primarily useful
for specifying alternate connection parameters.
@ -67,13 +73,22 @@ class Table(object):
... 'write': 10,
... }, indexes=[
... KeysOnlyIndex('MostRecentlyJoined', parts=[
... HashKey('username')
... RangeKey('date_joined')
... ]),
... ],
... connection=dynamodb2.connect_to_region('us-west-2',
... aws_access_key_id='key',
... aws_secret_access_key='key',
... ))
... ], global_indexes=[
... GlobalAllIndex('UsersByZipcode', parts=[
... HashKey('zipcode'),
... RangeKey('username'),
... ],
... throughput={
... 'read':10,
... 'write":10,
... }),
... ], connection=dynamodb2.connect_to_region('us-west-2',
... aws_access_key_id='key',
... aws_secret_access_key='key',
... ))
"""
self.table_name = table_name
@ -84,6 +99,7 @@ class Table(object):
}
self.schema = schema
self.indexes = indexes
self.global_indexes = global_indexes
if self.connection is None:
self.connection = DynamoDBConnection()
@ -95,7 +111,7 @@ class Table(object):
@classmethod
def create(cls, table_name, schema, throughput=None, indexes=None,
connection=None):
global_indexes=None, connection=None):
"""
Creates a new table in DynamoDB & returns an in-memory ``Table`` object.
@ -127,6 +143,10 @@ class Table(object):
Optionally accepts a ``indexes`` parameter, which should be a list of
``BaseIndexField`` subclasses representing the desired indexes.
Optionally accepts a ``global_indexes`` parameter, which should be a
list of ``GlobalBaseIndexField`` subclasses representing the desired
indexes.
Optionally accepts a ``connection`` parameter, which should be a
``DynamoDBConnection`` instance (or subclass). This is primarily useful
for specifying alternate connection parameters.
@ -142,7 +162,15 @@ class Table(object):
... }, indexes=[
... KeysOnlyIndex('MostRecentlyJoined', parts=[
... RangeKey('date_joined')
... ]),
... ]), global_indexes=[
... GlobalAllIndex('UsersByZipcode', parts=[
... HashKey('zipcode'),
... RangeKey('username'),
... ],
... throughput={
... 'read':10,
... 'write":10,
... }),
... ])
"""
@ -155,13 +183,18 @@ class Table(object):
if indexes is not None:
table.indexes = indexes
if global_indexes is not None:
table.global_indexes = global_indexes
# Prep the schema.
raw_schema = []
attr_defs = []
seen_attrs = set()
for field in table.schema:
raw_schema.append(field.schema())
# Build the attributes off what we know.
seen_attrs.add(field.name)
attr_defs.append(field.definition())
raw_throughput = {
@ -170,23 +203,24 @@ class Table(object):
}
kwargs = {}
if table.indexes:
# Prep the LSIs.
raw_lsi = []
kwarg_map = {
'indexes': 'local_secondary_indexes',
'global_indexes': 'global_secondary_indexes',
}
for index_attr in ('indexes', 'global_indexes'):
table_indexes = getattr(table, index_attr)
if table_indexes:
raw_indexes = []
for index_field in table_indexes:
raw_indexes.append(index_field.schema())
# Make sure all attributes specified in the indexes are
# added to the definition
for field in index_field.parts:
if field.name not in seen_attrs:
seen_attrs.add(field.name)
attr_defs.append(field.definition())
for index_field in table.indexes:
raw_lsi.append(index_field.schema())
# Again, build the attributes off what we know.
# HOWEVER, only add attributes *NOT* already seen.
attr_define = index_field.definition()
for part in attr_define:
attr_names = [attr['AttributeName'] for attr in attr_defs]
if not part['AttributeName'] in attr_names:
attr_defs.append(part)
kwargs['local_secondary_indexes'] = raw_lsi
kwargs[kwarg_map[index_attr]] = raw_indexes
table.connection.create_table(
table_name=table.table_name,
@ -294,7 +328,7 @@ class Table(object):
# This is leaky.
return result
def update(self, throughput):
def update(self, throughput, global_indexes=None):
"""
Updates table attributes in DynamoDB.
@ -316,12 +350,46 @@ class Table(object):
... })
True
# To also update the global index(es) throughput.
>>> users.update(throughput={
... 'read': 20,
... 'write': 10,
... },
... global_secondary_indexes={
... 'TheIndexNameHere': {
... 'read': 15,
... 'write': 5,
... }
... })
True
"""
self.throughput = throughput
self.connection.update_table(self.table_name, {
data = {
'ReadCapacityUnits': int(self.throughput['read']),
'WriteCapacityUnits': int(self.throughput['write']),
})
}
gsi_data = None
if global_indexes:
gsi_data = []
for gsi_name, gsi_throughput in global_indexes.items():
gsi_data.append({
"Update": {
"IndexName": gsi_name,
"ProvisionedThroughput": {
"ReadCapacityUnits": int(gsi_throughput['read']),
"WriteCapacityUnits": int(gsi_throughput['write']),
},
},
})
self.connection.update_table(
self.table_name,
provisioned_throughput=data,
global_secondary_index_updates=gsi_data
)
return True
def delete(self):
@ -424,7 +492,7 @@ class Table(object):
with boto.dynamodb. Unlike get_item, it takes hash_key and range_key first,
although you may still specify keyword arguments instead.
Also unlike the get_item command, if the returned item has no keys
Also unlike the get_item command, if the returned item has no keys
(i.e., it does not exist in DynamoDB), a None result is returned, instead
of an empty key object.
@ -668,6 +736,10 @@ class Table(object):
lookup['AttributeValueList'].append(
self._dynamizer.encode(value[1])
)
# Special-case the ``IN`` case
elif field_bits[-1] == 'in':
for val in value:
lookup['AttributeValueList'].append(self._dynamizer.encode(val))
else:
# Fix up the value for encoding, because it was built to only work
# with ``set``s.

View File

@ -37,6 +37,7 @@ RegionData = {
'ap-northeast-1': 'ec2.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'ec2.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'ec2.ap-southeast-2.amazonaws.com',
'cn-north-1': 'ec2.cn-north-1.amazonaws.com.cn',
}

View File

@ -37,7 +37,7 @@ class Address(EC2Object):
"""
def __init__(self, connection=None, public_ip=None, instance_id=None):
EC2Object.__init__(self, connection)
super(Address, self).__init__(connection)
self.connection = connection
self.public_ip = public_ip
self.instance_id = instance_id

View File

@ -55,6 +55,7 @@ RegionData = {
'ap-northeast-1': 'autoscaling.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'autoscaling.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'autoscaling.ap-southeast-2.amazonaws.com',
'cn-north-1': 'autoscaling.cn-north-1.amazonaws.com.cn',
}
@ -114,7 +115,7 @@ class AutoScaleConnection(AWSQueryConnection):
self.DefaultRegionEndpoint,
AutoScaleConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
super(AutoScaleConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
@ -163,7 +164,7 @@ class AutoScaleConnection(AWSQueryConnection):
# get availability zone information (required param)
zones = as_group.availability_zones
self.build_list_params(params, zones, 'AvailabilityZones')
if as_group.desired_capacity:
if as_group.desired_capacity is not None:
params['DesiredCapacity'] = as_group.desired_capacity
if as_group.vpc_zone_identifier:
params['VPCZoneIdentifier'] = as_group.vpc_zone_identifier
@ -785,7 +786,7 @@ class AutoScaleConnection(AWSQueryConnection):
params = {'AutoScalingGroupName': group_name,
'DesiredCapacity': desired_capacity}
if honor_cooldown:
params['HonorCooldown'] = json.dumps('True')
params['HonorCooldown'] = 'true'
return self.get_status('SetDesiredCapacity', params)

View File

@ -129,8 +129,8 @@ class AutoScalingGroup(object):
:param health_check_type: The service you want the health status from,
Amazon EC2 or Elastic Load Balancer.
:type launch_config_name: str or LaunchConfiguration
:param launch_config_name: Name of launch configuration (required).
:type launch_config: str or LaunchConfiguration
:param launch_config: Name of launch configuration (required).
:type load_balancers: list
:param load_balancers: List of load balancers.

View File

@ -14,21 +14,21 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Bundle Task
Represents an EC2 Bundle Task
"""
from boto.ec2.ec2object import EC2Object
class BundleInstanceTask(EC2Object):
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(BundleInstanceTask, self).__init__(connection)
self.id = None
self.instance_id = None
self.progress = None
@ -38,7 +38,7 @@ class BundleInstanceTask(EC2Object):
self.prefix = None
self.upload_policy = None
self.upload_policy_signature = None
self.update_time = None
self.update_time = None
self.code = None
self.message = None

View File

@ -41,6 +41,7 @@ RegionData = {
'ap-northeast-1': 'monitoring.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'monitoring.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'monitoring.ap-southeast-2.amazonaws.com',
'cn-north-1': 'monitoring.cn-north-1.amazonaws.com.cn',
}
@ -107,7 +108,7 @@ class CloudWatchConnection(AWSQueryConnection):
if self.region.name == 'eu-west-1':
validate_certs = False
AWSQueryConnection.__init__(self, aws_access_key_id,
super(CloudWatchConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
@ -117,7 +118,7 @@ class CloudWatchConnection(AWSQueryConnection):
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['ec2']
return ['hmac-v4']
def build_dimension_param(self, dimension, params):
prefix = 'Dimensions.member'

View File

@ -77,13 +77,6 @@ class Metric(object):
:param statistics: A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum
:type dimensions: dict
:param dimensions: A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
:type unit: string
:param unit: The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |

View File

@ -31,6 +31,7 @@ from datetime import datetime
from datetime import timedelta
import boto
from boto.auth import detect_potential_sigv4
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
from boto.ec2.image import Image, ImageAttribute, CopyImage
@ -62,6 +63,7 @@ from boto.ec2.instancestatus import InstanceStatusSet
from boto.ec2.volumestatus import VolumeStatusSet
from boto.ec2.networkinterface import NetworkInterface
from boto.ec2.attributes import AccountAttribute, VPCAttribute
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.exception import EC2ResponseError
#boto.set_stream_logger('ec2')
@ -69,7 +71,7 @@ from boto.exception import EC2ResponseError
class EC2Connection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'ec2_version', '2013-10-01')
APIVersion = boto.config.get('Boto', 'ec2_version', '2013-10-15')
DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
'ec2.us-east-1.amazonaws.com')
@ -89,7 +91,7 @@ class EC2Connection(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
super(EC2Connection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
@ -100,6 +102,7 @@ class EC2Connection(AWSQueryConnection):
if api_version:
self.APIVersion = api_version
@detect_potential_sigv4
def _required_auth_capability(self):
return ['ec2']
@ -260,7 +263,9 @@ class EC2Connection(AWSQueryConnection):
def register_image(self, name=None, description=None, image_location=None,
architecture=None, kernel_id=None, ramdisk_id=None,
root_device_name=None, block_device_map=None,
dry_run=False, virtualization_type=None):
dry_run=False, virtualization_type=None,
sriov_net_support=None,
snapshot_id=None):
"""
Register an image.
@ -299,6 +304,16 @@ class EC2Connection(AWSQueryConnection):
* paravirtual
* hvm
:type sriov_net_support: string
:param sriov_net_support: Advanced networking support.
Valid choices are:
* simple
:type snapshot_id: string
:param snapshot_id: A snapshot ID for the snapshot to be used
as root device for the image. Mutually exclusive with
block_device_map, requires root_device_name
:rtype: string
:return: The new image id
"""
@ -317,12 +332,19 @@ class EC2Connection(AWSQueryConnection):
params['ImageLocation'] = image_location
if root_device_name:
params['RootDeviceName'] = root_device_name
if snapshot_id:
root_vol = BlockDeviceType(snapshot_id=snapshot_id)
block_device_map = BlockDeviceMapping()
block_device_map[root_device_name] = root_vol
if block_device_map:
block_device_map.ec2_build_list_params(params)
if dry_run:
params['DryRun'] = 'true'
if virtualization_type:
params['VirtualizationType'] = virtualization_type
if sriov_net_support:
params['SriovNetSupport'] = sriov_net_support
rs = self.get_object('RegisterImage', params, ResultSet, verb='POST')
image_id = getattr(rs, 'imageId', None)
@ -510,7 +532,8 @@ class EC2Connection(AWSQueryConnection):
# Instance methods
def get_all_instances(self, instance_ids=None, filters=None, dry_run=False):
def get_all_instances(self, instance_ids=None, filters=None, dry_run=False,
max_results=None):
"""
Retrieve all the instance reservations associated with your account.
@ -535,6 +558,10 @@ class EC2Connection(AWSQueryConnection):
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type max_results: int
:param max_results: The maximum number of paginated instance
items per response.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
@ -543,10 +570,11 @@ class EC2Connection(AWSQueryConnection):
'replaced with get_all_reservations.'),
PendingDeprecationWarning)
return self.get_all_reservations(instance_ids=instance_ids,
filters=filters, dry_run=dry_run)
filters=filters, dry_run=dry_run,
max_results=max_results)
def get_only_instances(self, instance_ids=None, filters=None,
dry_run=False):
dry_run=False, max_results=None):
# A future release should rename this method to get_all_instances
# and make get_only_instances an alias for that.
"""
@ -566,17 +594,22 @@ class EC2Connection(AWSQueryConnection):
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type max_results: int
:param max_results: The maximum number of paginated instance
items per response.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Instance`
"""
reservations = self.get_all_reservations(instance_ids=instance_ids,
filters=filters,
dry_run=dry_run)
dry_run=dry_run,
max_results=max_results)
return [instance for reservation in reservations
for instance in reservation.instances]
def get_all_reservations(self, instance_ids=None, filters=None,
dry_run=False):
dry_run=False, max_results=None):
"""
Retrieve all the instance reservations associated with your account.
@ -594,6 +627,10 @@ class EC2Connection(AWSQueryConnection):
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type max_results: int
:param max_results: The maximum number of paginated instance
items per response.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
@ -612,6 +649,8 @@ class EC2Connection(AWSQueryConnection):
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
if max_results is not None:
params['MaxResults'] = max_results
return self.get_list('DescribeInstances', params,
[('item', Reservation)], verb='POST')
@ -723,6 +762,16 @@ class EC2Connection(AWSQueryConnection):
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
:type placement: string
:param placement: The Availability Zone to launch the instance into.
@ -1028,6 +1077,7 @@ class EC2Connection(AWSQueryConnection):
* sourceDestCheck
* groupSet
* ebsOptimized
* sriovNetSupport
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
@ -1137,6 +1187,7 @@ class EC2Connection(AWSQueryConnection):
* sourceDestCheck - Boolean (true)
* groupSet - Set of Security Groups or IDs
* ebsOptimized - Boolean (false)
* sriovNetSupport - String - ie: 'simple'
:type value: string
:param value: The new value for the attribute
@ -1249,7 +1300,8 @@ class EC2Connection(AWSQueryConnection):
def get_spot_price_history(self, start_time=None, end_time=None,
instance_type=None, product_description=None,
availability_zone=None, dry_run=False):
availability_zone=None, dry_run=False,
max_results=None):
"""
Retrieve the recent history of spot instances pricing.
@ -1283,6 +1335,10 @@ class EC2Connection(AWSQueryConnection):
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type max_results: int
:param max_results: The maximum number of paginated items
per response.
:rtype: list
:return: A list tuples containing price and timestamp.
"""
@ -1299,6 +1355,8 @@ class EC2Connection(AWSQueryConnection):
params['AvailabilityZone'] = availability_zone
if dry_run:
params['DryRun'] = 'true'
if max_results is not None:
params['MaxResults'] = max_results
return self.get_list('DescribeSpotPriceHistory', params,
[('item', SpotPriceHistory)], verb='POST')
@ -1361,16 +1419,34 @@ class EC2Connection(AWSQueryConnection):
:type instance_type: string
:param instance_type: The type of instance to run:
* t1.micro
* m1.small
* m1.medium
* m1.large
* m1.xlarge
* m3.xlarge
* m3.2xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cr1.8xlarge
* hi1.4xlarge
* hs1.8xlarge
* cc1.4xlarge
* t1.micro
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
:type placement: string
:param placement: The availability zone in which to launch
@ -2672,7 +2748,7 @@ class EC2Connection(AWSQueryConnection):
def import_key_pair(self, key_name, public_key_material, dry_run=False):
"""
mports the public key from an RSA key pair that you created
imports the public key from an RSA key pair that you created
with a third-party tool.
Supported formats:
@ -3931,7 +4007,7 @@ class EC2Connection(AWSQueryConnection):
params['Tag.%d.Value'%i] = value
i += 1
def get_all_tags(self, filters=None, dry_run=False):
def get_all_tags(self, filters=None, dry_run=False, max_results=None):
"""
Retrieve all the metadata tags associated with your account.
@ -3948,6 +4024,10 @@ class EC2Connection(AWSQueryConnection):
:type dry_run: bool
:param dry_run: Set to True if the operation should not actually run.
:type max_results: int
:param max_results: The maximum number of paginated instance
items per response.
:rtype: list
:return: A list of :class:`boto.ec2.tag.Tag` objects
"""
@ -3956,6 +4036,8 @@ class EC2Connection(AWSQueryConnection):
self.build_filter_params(params, filters)
if dry_run:
params['DryRun'] = 'true'
if max_results is not None:
params['MaxResults'] = max_results
return self.get_list('DescribeTags', params,
[('item', Tag)], verb='POST')

View File

@ -53,7 +53,7 @@ class TaggedEC2Object(EC2Object):
"""
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(TaggedEC2Object, self).__init__(connection)
self.tags = TagSet()
def startElement(self, name, attrs, connection):

View File

@ -44,6 +44,7 @@ RegionData = {
'ap-northeast-1': 'elasticloadbalancing.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticloadbalancing.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'elasticloadbalancing.ap-southeast-2.amazonaws.com',
'cn-north-1': 'elasticloadbalancing.cn-north-1.amazonaws.com.cn',
}
@ -102,7 +103,7 @@ class ELBConnection(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
super(ELBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
@ -159,7 +160,7 @@ class ELBConnection(AWSQueryConnection):
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
@ -264,7 +265,7 @@ class ELBConnection(AWSQueryConnection):
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS AIM
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
@ -390,6 +391,76 @@ class ELBConnection(AWSQueryConnection):
params, LoadBalancerZones)
return obj.zones
def modify_lb_attribute(self, load_balancer_name, attribute, value):
"""Changes an attribute of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to change.
* crossZoneLoadBalancing - Boolean (true)
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
bool_reqs = ('crosszoneloadbalancing',)
if attribute.lower() in bool_reqs:
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'LoadBalancerName': load_balancer_name}
if attribute.lower() == 'crosszoneloadbalancing':
params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled'
] = value
else:
raise ValueError('InvalidAttribute', attribute)
return self.get_status('ModifyLoadBalancerAttributes', params,
verb='GET')
def get_all_lb_attributes(self, load_balancer_name):
"""Gets all Attributes of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:rtype: boto.ec2.elb.attribute.LbAttributes
:return: The attribute object of the ELB.
"""
from boto.ec2.elb.attributes import LbAttributes
params = {'LoadBalancerName': load_balancer_name}
return self.get_object('DescribeLoadBalancerAttributes',
params, LbAttributes)
def get_lb_attribute(self, load_balancer_name, attribute):
"""Gets an attribute of a Load Balancer
This will make an EC2 call for each method call.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to see.
* crossZoneLoadBalancing - Boolean
:rtype: Attribute dependent
:return: The new value for the attribute
"""
attributes = self.get_all_lb_attributes(load_balancer_name)
if attribute.lower() == 'crosszoneloadbalancing':
return attributes.cross_zone_load_balancing.enabled
return None
def register_instances(self, load_balancer_name, instances):
"""
Add new Instances to an existing Load Balancer.

View File

@ -0,0 +1,61 @@
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Created by Chris Huegle for TellApart, Inc.
class CrossZoneLoadBalancingAttribute(object):
"""
Represents the CrossZoneLoadBalancing segement of ELB Attributes.
"""
def __init__(self, connection=None):
self.enabled = None
def __repr__(self):
return 'CrossZoneLoadBalancingAttribute(%s)' % (
self.enabled)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
class LbAttributes(object):
"""
Represents the Attributes of an Elastic Load Balancer.
"""
def __init__(self, connection=None):
self.connection = connection
self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute(
self.connection)
def __repr__(self):
return 'LbAttributes(%s)' % (
repr(self.cross_zone_load_balancing))
def startElement(self, name, attrs, connection):
if name == 'CrossZoneLoadBalancing':
return self.cross_zone_load_balancing
def endElement(self, name, value, connection):
pass

View File

@ -122,6 +122,7 @@ class LoadBalancer(object):
self.vpc_id = None
self.scheme = None
self.backends = None
self._attributes = None
def __repr__(self):
return 'LoadBalancer:%s' % self.name
@ -203,6 +204,58 @@ class LoadBalancer(object):
new_zones = self.connection.disable_availability_zones(self.name, zones)
self.availability_zones = new_zones
def get_attributes(self, force=False):
"""
Gets the LbAttributes. The Attributes will be cached.
:type force: bool
:param force: Ignore cache value and reload.
:rtype: boto.ec2.elb.attributes.LbAttributes
:return: The LbAttribues object
"""
if not self._attributes or force:
self._attributes = self.connection.get_all_lb_attributes(self.name)
return self._attributes
def is_cross_zone_load_balancing(self, force=False):
"""
Identifies if the ELB is current configured to do CrossZone Balancing.
:type force: bool
:param force: Ignore cache value and reload.
:rtype: bool
:return: True if balancing is enabled, False if not.
"""
return self.get_attributes(force).cross_zone_load_balancing.enabled
def enable_cross_zone_load_balancing(self):
"""
Turns on CrossZone Load Balancing for this ELB.
:rtype: bool
:return: True if successful, False if not.
"""
success = self.connection.modify_lb_attribute(
self.name, 'crossZoneLoadBalancing', True)
if success and self._attributes:
self._attributes.cross_zone_load_balancing.enabled = True
return success
def disable_cross_zone_load_balancing(self):
"""
Turns off CrossZone Load Balancing for this ELB.
:rtype: bool
:return: True if successful, False if not.
"""
success = self.connection.modify_lb_attribute(
self.name, 'crossZoneLoadBalancing', False)
if success and self._attributes:
self._attributes.cross_zone_load_balancing.enabled = False
return success
def register_instances(self, instances):
"""
Adds instances to this load balancer. All instances must be in the same

View File

@ -15,13 +15,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Group:
class Group(object):
def __init__(self, parent=None):
self.id = None
self.name = None
@ -36,4 +35,4 @@ class Group:
self.name = value
else:
setattr(self, name, value)

View File

@ -23,8 +23,8 @@
from boto.ec2.ec2object import EC2Object, TaggedEC2Object
from boto.ec2.blockdevicemapping import BlockDeviceMapping
class ProductCodes(list):
class ProductCodes(list):
def startElement(self, name, attrs, connection):
pass
@ -32,8 +32,8 @@ class ProductCodes(list):
if name == 'productCode':
self.append(value)
class BillingProducts(list):
class BillingProducts(list):
def startElement(self, name, attrs, connection):
pass
@ -47,7 +47,7 @@ class Image(TaggedEC2Object):
"""
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
super(Image, self).__init__(connection)
self.id = None
self.location = None
self.state = None
@ -70,12 +70,13 @@ class Image(TaggedEC2Object):
self.virtualization_type = None
self.hypervisor = None
self.instance_lifecycle = None
self.sriov_net_support = None
def __repr__(self):
return 'Image:%s' % self.id
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
retval = super(Image, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'blockDeviceMapping':
@ -136,6 +137,8 @@ class Image(TaggedEC2Object):
self.hypervisor = value
elif name == 'instanceLifecycle':
self.instance_lifecycle = value
elif name == 'sriovNetSupport':
self.sriov_net_support = value
else:
setattr(self, name, value)
@ -218,6 +221,16 @@ class Image(TaggedEC2Object):
* cc1.4xlarge
* cg1.4xlarge
* cc2.8xlarge
* g2.2xlarge
* c3.large
* c3.xlarge
* c3.2xlarge
* c3.4xlarge
* c3.8xlarge
* i2.xlarge
* i2.2xlarge
* i2.4xlarge
* i2.8xlarge
:type placement: string
:param placement: The Availability Zone to launch the instance into.
@ -365,8 +378,8 @@ class Image(TaggedEC2Object):
)
return img_attrs.ramdisk
class ImageAttribute:
class ImageAttribute(object):
def __init__(self, parent=None):
self.name = None
self.kernel = None

View File

@ -122,7 +122,7 @@ class Reservation(EC2Object):
Reservation.
"""
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(Reservation, self).__init__(connection)
self.id = None
self.owner_id = None
self.groups = []
@ -211,7 +211,7 @@ class Instance(TaggedEC2Object):
"""
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
super(Instance, self).__init__(connection)
self.id = None
self.dns_name = None
self.public_dns_name = None
@ -288,7 +288,7 @@ class Instance(TaggedEC2Object):
return self._placement.tenancy
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
retval = super(Instance, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'monitoring':
@ -606,8 +606,7 @@ class Instance(TaggedEC2Object):
)
class ConsoleOutput:
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
@ -629,7 +628,6 @@ class ConsoleOutput:
class InstanceAttribute(dict):
ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData',
'disableApiTermination',
'instanceInitiatedShutdownBehavior',
@ -668,7 +666,6 @@ class InstanceAttribute(dict):
class SubParse(dict):
def __init__(self, section, parent=None):
dict.__init__(self)
self.section = section

View File

@ -30,7 +30,7 @@ from boto.exception import BotoClientError
class KeyPair(EC2Object):
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(KeyPair, self).__init__(connection)
self.name = None
self.fingerprint = None
self.material = None

View File

@ -44,7 +44,7 @@ class GroupList(list):
class LaunchSpecification(EC2Object):
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(LaunchSpecification, self).__init__(connection)
self.key_name = None
self.instance_type = None
self.image_id = None

View File

@ -99,7 +99,7 @@ class NetworkInterface(TaggedEC2Object):
"""
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
super(NetworkInterface, self).__init__(connection)
self.id = None
self.subnet_id = None
self.vpc_id = None
@ -119,7 +119,8 @@ class NetworkInterface(TaggedEC2Object):
return 'NetworkInterface:%s' % self.id
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
retval = super(NetworkInterface, self).startElement(name, attrs,
connection)
if retval is not None:
return retval
if name == 'groupSet':

View File

@ -27,7 +27,7 @@ from boto.exception import BotoClientError
class PlacementGroup(EC2Object):
def __init__(self, connection=None, name=None, strategy=None, state=None):
EC2Object.__init__(self, connection)
super(PlacementGroup, self).__init__(connection)
self.name = name
self.strategy = strategy
self.state = state

View File

@ -16,7 +16,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -27,8 +27,8 @@ class EC2RegionInfo(RegionInfo):
"""
Represents an EC2 Region
"""
def __init__(self, connection=None, name=None, endpoint=None):
from boto.ec2.connection import EC2Connection
RegionInfo.__init__(self, connection, name, endpoint,
super(EC2RegionInfo, self).__init__(connection, name, endpoint,
EC2Connection)

View File

@ -31,7 +31,7 @@ class ReservedInstancesOffering(EC2Object):
usage_price=None, description=None, instance_tenancy=None,
currency_code=None, offering_type=None,
recurring_charges=None, pricing_details=None):
EC2Object.__init__(self, connection)
super(ReservedInstancesOffering, self).__init__(connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
@ -128,9 +128,10 @@ class ReservedInstance(ReservedInstancesOffering):
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
ReservedInstancesOffering.__init__(self, connection, id, instance_type,
availability_zone, duration, fixed_price,
usage_price, description)
super(ReservedInstance, self).__init__(connection, id, instance_type,
availability_zone, duration,
fixed_price, usage_price,
description)
self.instance_count = instance_count
self.state = state
self.start = None
@ -148,7 +149,7 @@ class ReservedInstance(ReservedInstancesOffering):
elif name == 'start':
self.start = value
else:
ReservedInstancesOffering.endElement(self, name, value, connection)
super(ReservedInstance, self).endElement(name, value, connection)
class ReservedInstanceListing(EC2Object):

View File

@ -31,7 +31,7 @@ class SecurityGroup(TaggedEC2Object):
def __init__(self, connection=None, owner_id=None,
name=None, description=None, id=None):
TaggedEC2Object.__init__(self, connection)
super(SecurityGroup, self).__init__(connection)
self.id = id
self.owner_id = owner_id
self.name = name
@ -44,7 +44,8 @@ class SecurityGroup(TaggedEC2Object):
return 'SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
retval = super(SecurityGroup, self).startElement(name, attrs,
connection)
if retval is not None:
return retval
if name == 'ipPermissions':

View File

@ -26,12 +26,12 @@ Represents an EC2 Elastic Block Store Snapshot
from boto.ec2.ec2object import TaggedEC2Object
from boto.ec2.zone import Zone
class Snapshot(TaggedEC2Object):
class Snapshot(TaggedEC2Object):
AttrName = 'createVolumePermission'
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
super(Snapshot, self).__init__(connection)
self.id = None
self.volume_id = None
self.status = None
@ -156,8 +156,7 @@ class Snapshot(TaggedEC2Object):
)
class SnapshotAttribute:
class SnapshotAttribute(object):
def __init__(self, parent=None):
self.snapshot_id = None
self.attrs = {}

View File

@ -29,7 +29,7 @@ class SpotDatafeedSubscription(EC2Object):
def __init__(self, connection=None, owner_id=None,
bucket=None, prefix=None, state=None,fault=None):
EC2Object.__init__(self, connection)
super(SpotDatafeedSubscription, self).__init__(connection)
self.owner_id = owner_id
self.bucket = bucket
self.prefix = prefix

View File

@ -120,7 +120,7 @@ class SpotInstanceRequest(TaggedEC2Object):
"""
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
super(SpotInstanceRequest, self).__init__(connection)
self.id = None
self.price = None
self.type = None
@ -141,7 +141,8 @@ class SpotInstanceRequest(TaggedEC2Object):
return 'SpotInstanceRequest:%s' % self.id
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
retval = super(SpotInstanceRequest, self).startElement(name, attrs,
connection)
if retval is not None:
return retval
if name == 'launchSpecification':

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -26,9 +26,9 @@ Represents an EC2 Spot Instance Request
from boto.ec2.ec2object import EC2Object
class SpotPriceHistory(EC2Object):
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(SpotPriceHistory, self).__init__(connection)
self.price = 0.0
self.instance_type = None
self.product_description = None

View File

@ -35,7 +35,7 @@ class VmType(EC2Object):
def __init__(self, connection=None, name=None, cores=None,
memory=None, disk=None):
EC2Object.__init__(self, connection)
super(VmType, self).__init__(connection)
self.connection = connection
self.name = name
self.cores = cores

View File

@ -47,7 +47,7 @@ class Volume(TaggedEC2Object):
"""
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
super(Volume, self).__init__(connection)
self.id = None
self.create_time = None
self.status = None
@ -62,7 +62,7 @@ class Volume(TaggedEC2Object):
return 'Volume:%s' % self.id
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
retval = super(Volume, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'attachmentSet':
@ -260,7 +260,6 @@ class AttachmentSet(object):
:ivar attach_time: Attached since
:ivar device: The device the instance has mapped
"""
def __init__(self):
self.id = None
self.instance_id = None
@ -289,8 +288,7 @@ class AttachmentSet(object):
setattr(self, name, value)
class VolumeAttribute:
class VolumeAttribute(object):
def __init__(self, parent=None):
self.id = None
self._key_name = None

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -38,7 +38,7 @@ class MessageSet(list):
self.append(value)
else:
setattr(self, name, value)
class Zone(EC2Object):
"""
Represents an Availability Zone.
@ -48,9 +48,9 @@ class Zone(EC2Object):
:ivar region_name: The name of the region the zone is associated with.
:ivar messages: A list of messages related to the zone.
"""
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
super(Zone, self).__init__(connection)
self.name = None
self.state = None
self.region_name = None
@ -64,7 +64,7 @@ class Zone(EC2Object):
self.messages = MessageSet()
return self.messages
return None
def endElement(self, name, value, connection):
if name == 'zoneName':
self.name = value

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -42,7 +42,7 @@ class ECSConnection(AWSQueryConnection):
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com',
debug=0, https_connection_factory=None, path='/'):
AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, path)
@ -77,13 +77,13 @@ class ECSConnection(AWSQueryConnection):
#
# Group methods
#
def item_search(self, search_index, **params):
"""
Returns items that satisfy the search criteria, including one or more search
Returns items that satisfy the search criteria, including one or more search
indices.
For a full list of search terms,
For a full list of search terms,
:see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html
"""
params['SearchIndex'] = search_index

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -26,7 +26,7 @@ from StringIO import StringIO
class ResponseGroup(xml.sax.ContentHandler):
"""A Generic "Response Group", which can
be anything from the entire list of Items to
be anything from the entire list of Items to
specific response elements within an item"""
def __init__(self, connection=None, nodename=None):
@ -45,7 +45,7 @@ class ResponseGroup(xml.sax.ContentHandler):
#
def get(self, name):
return self.__dict__.get(name)
def set(self, name, value):
self.__dict__[name] = value
@ -90,14 +90,14 @@ class Item(ResponseGroup):
def __init__(self, connection=None):
"""Initialize this Item"""
ResponseGroup.__init__(self, connection, "Item")
super(Item, self).__init__(connection, "Item")
class ItemSet(ResponseGroup):
"""A special ResponseGroup that has built-in paging, and
only creates new Items on the "Item" tag"""
def __init__(self, connection, action, params, page=0):
ResponseGroup.__init__(self, connection, "Items")
super(ItemSet, self).__init__(connection, "Items")
self.objs = []
self.iter = None
self.page = page
@ -150,4 +150,4 @@ class ItemSet(ResponseGroup):
"""Override to first fetch everything"""
for item in self:
pass
return ResponseGroup.to_xml(self)
return super(ItemSet, self).to_xml()

View File

@ -55,6 +55,9 @@ def regions():
RegionInfo(name='sa-east-1',
endpoint='elasticache.sa-east-1.amazonaws.com',
connection_cls=ElastiCacheConnection),
RegionInfo(name='cn-north-1',
endpoint='elasticache.cn-north-1.amazonaws.com.cn',
connection_cls=ElastiCacheConnection),
]

View File

@ -55,12 +55,12 @@ class ElastiCacheConnection(AWSQueryConnection):
else:
del kwargs['region']
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
super(ElastiCacheConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['sign-v2']
return ['hmac-v4']
def authorize_cache_security_group_ingress(self,
cache_security_group_name,
@ -99,8 +99,8 @@ class ElastiCacheConnection(AWSQueryConnection):
verb='POST',
path='/', params=params)
def create_cache_cluster(self, cache_cluster_id, num_cache_nodes,
cache_node_type, engine,
def create_cache_cluster(self, cache_cluster_id, num_cache_nodes=None,
cache_node_type=None, engine=None,
replication_group_id=None, engine_version=None,
cache_parameter_group_name=None,
cache_subnet_group_name=None,
@ -244,10 +244,13 @@ class ElastiCacheConnection(AWSQueryConnection):
"""
params = {
'CacheClusterId': cache_cluster_id,
'NumCacheNodes': num_cache_nodes,
'CacheNodeType': cache_node_type,
'Engine': engine,
}
if num_cache_nodes is not None:
params['NumCacheNodes'] = num_cache_nodes
if cache_node_type is not None:
params['CacheNodeType'] = cache_node_type
if engine is not None:
params['Engine'] = engine
if replication_group_id is not None:
params['ReplicationGroupId'] = replication_group_id
if engine_version is not None:

View File

@ -55,7 +55,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
else:
del kwargs['region']
kwargs['host'] = region.endpoint
AWSAuthConnection.__init__(self, **kwargs)
super(ElasticTranscoderConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
@ -523,26 +523,56 @@ class ElasticTranscoderConnection(AWSAuthConnection):
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_pipelines(self):
def list_pipelines(self, ascending=None, page_token=None):
"""
The ListPipelines operation gets a list of the pipelines
associated with the current AWS account.
"""
uri = '/2012-09-25/pipelines'
return self.make_request('GET', uri, expected_status=200)
:type ascending: string
:param ascending: To list pipelines in chronological order by the date
and time that they were created, enter `True`. To list pipelines in
reverse chronological order, enter `False`.
def list_presets(self):
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/pipelines'.format()
params = {}
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_presets(self, ascending=None, page_token=None):
"""
The ListPresets operation gets a list of the default presets
included with Elastic Transcoder and the presets that you've
added in an AWS region.
:type ascending: string
:param ascending: To list presets in chronological order by the date
and time that they were created, enter `True`. To list presets in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/presets'
return self.make_request('GET', uri, expected_status=200)
uri = '/2012-09-25/presets'.format()
params = {}
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def read_job(self, id=None):
"""
@ -891,8 +921,8 @@ class ElasticTranscoderConnection(AWSAuthConnection):
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data)
response = super(ElasticTranscoderConnection, self).make_request(
verb, resource, headers=headers, data=data)
body = json.load(response)
if response.status == expected_status:
return body

View File

@ -63,6 +63,9 @@ def regions():
RegionInfo(name='sa-east-1',
endpoint='sa-east-1.elasticmapreduce.amazonaws.com',
connection_cls=EmrConnection),
RegionInfo(name='cn-north-1',
endpoint='elasticmapreduce.cn-north-1.amazonaws.com.cn',
connection_cls=EmrConnection),
]

View File

@ -60,7 +60,7 @@ class EmrConnection(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
super(EmrConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
@ -267,6 +267,42 @@ class EmrConnection(AWSQueryConnection):
self.get_object('ListSteps', params, StepSummaryList)
def add_tags(self, resource_id, tags):
"""
Create new metadata tags for the specified resource id.
:type resource_id: str
:param resource_id: The cluster id
:type tags: dict
:param tags: A dictionary containing the name/value pairs.
If you want to create only a tag name, the
value for that tag should be the empty string
(e.g. '') or None.
"""
assert isinstance(resource_id, basestring)
params = {
'ResourceId': resource_id,
}
params.update(self._build_tag_list(tags))
return self.get_status('AddTags', params, verb='POST')
def remove_tags(self, resource_id, tags):
"""
Remove metadata tags for the specified resource id.
:type resource_id: str
:param resource_id: The cluster id
:type tags: list
:param tags: A list of tag names to remove.
"""
params = {
'ResourceId': resource_id,
}
params.update(self._build_string_list('TagKeys', tags))
return self.get_status('RemoveTags', params, verb='POST')
def terminate_jobflow(self, jobflow_id):
"""
Terminate an Elastic MapReduce job flow
@ -623,6 +659,27 @@ class EmrConnection(AWSQueryConnection):
params['Steps.member.%s.%s' % (i+1, key)] = value
return params
def _build_string_list(self, field, items):
if not isinstance(items, types.ListType):
items = [items]
params = {}
for i, item in enumerate(items):
params['%s.member.%s' % (field, i + 1)] = item
return params
def _build_tag_list(self, tags):
assert isinstance(tags, dict)
params = {}
for i, key_value in enumerate(sorted(tags.iteritems()), start=1):
key, value = key_value
current_prefix = 'Tags.member.%s' % i
params['%s.Key' % current_prefix] = key
if value:
params['%s.Value' % current_prefix] = value
return params
def _build_instance_common_args(self, ec2_keyname, availability_zone,
keep_alive, hadoop_version):
"""

View File

@ -256,6 +256,7 @@ class Cluster(EmrObject):
self.status = None
self.ec2instanceattributes = None
self.applications = None
self.tags = None
def startElement(self, name, attrs, connection):
if name == 'Status':
@ -266,6 +267,9 @@ class Cluster(EmrObject):
return self.ec2instanceattributes
elif name == 'Applications':
self.applications = ResultSet([('member', Application)])
elif name == 'Tags':
self.tags = ResultSet([('member', KeyValue)])
return self.tags
else:
return None

View File

@ -204,7 +204,7 @@ class ScriptRunnerStep(JarStep):
ScriptRunnerJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
def __init__(self, name, **kw):
JarStep.__init__(self, name, self.ScriptRunnerJar, **kw)
super(ScriptRunnerStep, self).__init__(name, self.ScriptRunnerJar, **kw)
class PigBase(ScriptRunnerStep):
@ -225,7 +225,7 @@ class InstallPigStep(PigBase):
step_args.extend(self.BaseArgs)
step_args.extend(['--install-pig'])
step_args.extend(['--pig-versions', pig_versions])
ScriptRunnerStep.__init__(self, self.InstallPigName, step_args=step_args)
super(InstallPigStep, self).__init__(self.InstallPigName, step_args=step_args)
class PigStep(PigBase):
@ -239,7 +239,7 @@ class PigStep(PigBase):
step_args.extend(['--pig-versions', pig_versions])
step_args.extend(['--run-pig-script', '--args', '-f', pig_file])
step_args.extend(pig_args)
ScriptRunnerStep.__init__(self, name, step_args=step_args)
super(PigStep, self).__init__(name, step_args=step_args)
class HiveBase(ScriptRunnerStep):
@ -261,7 +261,7 @@ class InstallHiveStep(HiveBase):
step_args.extend(['--hive-versions', hive_versions])
if hive_site is not None:
step_args.extend(['--hive-site=%s' % hive_site])
ScriptRunnerStep.__init__(self, self.InstallHiveName,
super(InstallHiveStep, self).__init__(self.InstallHiveName,
step_args=step_args)
@ -278,4 +278,4 @@ class HiveStep(HiveBase):
step_args.extend(['--run-hive-script', '--args', '-f', hive_file])
if hive_args is not None:
step_args.extend(hive_args)
ScriptRunnerStep.__init__(self, name, step_args=step_args)
super(HiveStep, self).__init__(name, step_args=step_args)

View File

@ -34,9 +34,8 @@ class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
StandardError.__init__(self, reason, *args)
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
@ -45,32 +44,35 @@ class BotoClientError(StandardError):
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
StandardError.__init__(self, status, reason, body, *args)
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
@ -134,8 +136,8 @@ class BotoServerError(StandardError):
self.message = None
self.box_usage = None
class ConsoleOutput:
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
@ -154,19 +156,20 @@ class ConsoleOutput:
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
BotoServerError.__init__(self, status, reason, body)
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return BotoServerError.endElement(self, name, value, connection)
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
@ -174,30 +177,35 @@ class S3CreateError(StorageCreateError):
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
@ -205,10 +213,10 @@ class SQSError(BotoServerError):
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
BotoServerError.__init__(self, status, reason, body)
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
@ -216,19 +224,20 @@ class SQSError(BotoServerError):
elif name == 'Type':
self.type = value
else:
return BotoServerError.endElement(self, name, value, connection)
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
BotoClientError.__init__(self, reason, message)
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
@ -237,49 +246,54 @@ class SQSDecodeError(BotoClientError):
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
BotoServerError.__init__(self, status, reason, body)
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
return super(StorageResponseError, self).startElement(name, attrs,
connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return BotoServerError.endElement(self, name, value, connection)
return super(StorageResponseError, self).endElement(name, value,
connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
BotoServerError.__init__(self, status, reason, body)
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
@ -299,11 +313,12 @@ class EC2ResponseError(BotoServerError):
return None # don't call subclass here
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
@ -342,8 +357,8 @@ class EmrResponseError(BotoServerError):
"""
pass
class _EC2Error:
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
@ -360,6 +375,7 @@ class _EC2Error:
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
@ -394,21 +410,21 @@ class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
@ -419,7 +435,7 @@ class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
@ -454,7 +470,7 @@ class ResumableUploadException(Exception):
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
@ -470,7 +486,7 @@ class ResumableDownloadException(Exception):
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
@ -485,7 +501,7 @@ class TooManyRecordsException(Exception):
"""
def __init__(self, message):
Exception.__init__(self, message)
super(TooManyRecordsException, self).__init__(message)
self.message = message

View File

@ -109,7 +109,7 @@ class FPSConnection(AWSQueryConnection):
def __init__(self, *args, **kw):
self.currencycode = kw.pop('CurrencyCode', self.currencycode)
kw.setdefault('host', 'fps.sandbox.amazonaws.com')
AWSQueryConnection.__init__(self, *args, **kw)
super(FPSConnection, self).__init__(*args, **kw)
def _required_auth_capability(self):
return ['fps']

View File

@ -9,7 +9,7 @@ def ResponseFactory(action):
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != action + 'Response':
Response.endElement(self, name, value, connection)
super(FPSResponse, self).endElement(name, value, connection)
return FPSResponse
@ -48,7 +48,7 @@ class Response(ResponseElement):
elif name == self._action + 'Result':
setattr(self, name, self._Result(name=name))
else:
return ResponseElement.startElement(self, name, attrs, connection)
return super(Response, self).startElement(name, attrs, connection)
return getattr(self, name)
@ -66,12 +66,12 @@ class ComplexAmount(ResponseElement):
if name not in ('CurrencyCode', 'Value'):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return ResponseElement.startElement(self, name, attrs, connection)
return super(ComplexAmount, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
ResponseElement.endElement(self, name, value, connection)
super(ComplexAmount, self).endElement(name, value, connection)
class AmountCollection(ResponseElement):
@ -85,7 +85,7 @@ class AccountBalance(AmountCollection):
if name == 'AvailableBalances':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return AmountCollection.startElement(self, name, attrs, connection)
return super(AccountBalance, self).startElement(name, attrs, connection)
class GetAccountBalanceResult(ResponseElement):
@ -93,7 +93,8 @@ class GetAccountBalanceResult(ResponseElement):
if name == 'AccountBalance':
setattr(self, name, AccountBalance(name=name))
return getattr(self, name)
return Response.startElement(self, name, attrs, connection)
return super(GetAccountBalanceResult, self).startElement(name, attrs,
connection)
class GetTotalPrepaidLiabilityResult(ResponseElement):
@ -101,7 +102,8 @@ class GetTotalPrepaidLiabilityResult(ResponseElement):
if name == 'OutstandingPrepaidLiability':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return Response.startElement(self, name, attrs, connection)
return super(GetTotalPrepaidLiabilityResult, self).startElement(name,
attrs, connection)
class GetPrepaidBalanceResult(ResponseElement):
@ -109,7 +111,8 @@ class GetPrepaidBalanceResult(ResponseElement):
if name == 'PrepaidBalance':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return Response.startElement(self, name, attrs, connection)
return super(GetPrepaidBalanceResult, self).startElement(name, attrs,
connection)
class GetOutstandingDebtBalanceResult(ResponseElement):
@ -117,7 +120,8 @@ class GetOutstandingDebtBalanceResult(ResponseElement):
if name == 'OutstandingDebt':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return Response.startElement(self, name, attrs, connection)
return super(GetOutstandingDebtBalanceResult, self).startElement(name,
attrs, connection)
class TransactionPart(ResponseElement):
@ -125,13 +129,14 @@ class TransactionPart(ResponseElement):
if name == 'FeesPaid':
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return ResponseElement.startElement(self, name, attrs, connection)
return super(TransactionPart, self).startElement(name, attrs,
connection)
class Transaction(ResponseElement):
def __init__(self, *args, **kw):
self.TransactionPart = []
ResponseElement.__init__(self, *args, **kw)
super(Transaction, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'TransactionPart':
@ -140,19 +145,20 @@ class Transaction(ResponseElement):
if name in ('TransactionAmount', 'FPSFees', 'Balance'):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return ResponseElement.startElement(self, name, attrs, connection)
return super(Transaction, self).startElement(name, attrs, connection)
class GetAccountActivityResult(ResponseElement):
def __init__(self, *args, **kw):
self.Transaction = []
ResponseElement.__init__(self, *args, **kw)
super(GetAccountActivityResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Transaction':
getattr(self, name).append(Transaction(name=name))
return getattr(self, name)[-1]
return ResponseElement.startElement(self, name, attrs, connection)
return super(GetAccountActivityResult, self).startElement(name, attrs,
connection)
class GetTransactionResult(ResponseElement):
@ -160,16 +166,18 @@ class GetTransactionResult(ResponseElement):
if name == 'Transaction':
setattr(self, name, Transaction(name=name))
return getattr(self, name)
return ResponseElement.startElement(self, name, attrs, connection)
return super(GetTransactionResult, self).startElement(name, attrs,
connection)
class GetTokensResult(ResponseElement):
def __init__(self, *args, **kw):
self.Token = []
ResponseElement.__init__(self, *args, **kw)
super(GetTokensResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Token':
getattr(self, name).append(ResponseElement(name=name))
return getattr(self, name)[-1]
return ResponseElement.startElement(self, name, attrs, connection)
return super(GetTokensResult, self).startElement(name, attrs,
connection)

View File

@ -50,6 +50,9 @@ def regions():
RegionInfo(name='ap-southeast-2',
endpoint='glacier.ap-southeast-2.amazonaws.com',
connection_cls=Layer2),
RegionInfo(name='cn-north-1',
endpoint='glacier.cn-north-1.amazonaws.com.cn',
connection_cls=Layer2),
]

View File

@ -54,7 +54,7 @@ class Layer1(AWSAuthConnection):
self.region = region
self.account_id = account_id
AWSAuthConnection.__init__(self, region.endpoint,
super(Layer1, self).__init__(region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
@ -72,7 +72,7 @@ class Layer1(AWSAuthConnection):
headers = {}
headers['x-amz-glacier-version'] = self.Version
uri = '/%s/%s' % (self.account_id, resource)
response = AWSAuthConnection.make_request(self, verb, uri,
response = super(Layer1, self).make_request(verb, uri,
params=params,
headers=headers,
sender=sender,

View File

@ -221,6 +221,14 @@ class Bucket(S3Bucket):
marker, generation_marker,
headers)
def validate_get_all_versions_params(self, params):
"""
See documentation in boto/s3/bucket.py.
"""
self.validate_kwarg_names(params,
['version_id_marker', 'delimiter', 'marker',
'generation_marker', 'prefix', 'max_keys'])
def delete_key(self, key_name, headers=None, version_id=None,
mfa_token=None, generation=None):
"""

View File

@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.gs.bucket import Bucket
from boto.gs.bucket import Bucket
from boto.s3.connection import S3Connection
from boto.s3.connection import SubdomainCallingFormat
from boto.s3.connection import check_lowercase_bucketname
@ -40,7 +40,7 @@ class GSConnection(S3Connection):
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
suppress_consec_slashes=True):
S3Connection.__init__(self, gs_access_key_id, gs_secret_access_key,
super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, calling_format, path,
"google", Bucket,
@ -52,12 +52,12 @@ class GSConnection(S3Connection):
"""
Creates a new bucket. By default it's located in the USA. You can
pass Location.EU to create bucket in the EU. You can also pass
a LocationConstraint for where the bucket should be located, and
a LocationConstraint for where the bucket should be located, and
a StorageClass describing how the data should be stored.
:type bucket_name: string
:param bucket_name: The name of the new bucket.
:type headers: dict
:param headers: Additional headers to pass along with the request to GCS.
@ -70,7 +70,7 @@ class GSConnection(S3Connection):
:type storage_class: string
:param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'.
"""
check_lowercase_bucketname(bucket_name)

View File

@ -55,6 +55,9 @@ def regions():
connection_cls=IAMConnection),
IAMRegionInfo(name='us-gov-west-1',
endpoint='iam.us-gov.amazonaws.com',
connection_cls=IAMConnection),
IAMRegionInfo(name='cn-north-1',
endpoint='iam.cn-north-1.amazonaws.com.cn',
connection_cls=IAMConnection)
]

View File

@ -42,7 +42,7 @@ class IAMConnection(AWSQueryConnection):
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
debug=0, https_connection_factory=None,
path='/', security_token=None, validate_certs=True):
AWSQueryConnection.__init__(self, aws_access_key_id,
super(IAMConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
@ -51,7 +51,6 @@ class IAMConnection(AWSQueryConnection):
validate_certs=validate_certs)
def _required_auth_capability(self):
#return ['iam']
return ['hmac-v4']
def get_response(self, action, params, path='/', parent=None,
@ -65,11 +64,16 @@ class IAMConnection(AWSQueryConnection):
body = response.read()
boto.log.debug(body)
if response.status == 200:
e = boto.jsonresponse.Element(list_marker=list_marker,
pythonize_name=True)
h = boto.jsonresponse.XmlHandler(e, parent)
h.parse(body)
return e
if body:
e = boto.jsonresponse.Element(list_marker=list_marker,
pythonize_name=True)
h = boto.jsonresponse.XmlHandler(e, parent)
h.parse(body)
return e
else:
# Support empty responses, e.g. deleting a SAML provider
# according to the official documentation.
return {}
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
@ -1318,3 +1322,113 @@ class IAMConnection(AWSQueryConnection):
return self.get_response('UpdateAssumeRolePolicy',
{'RoleName': role_name,
'PolicyDocument': policy_document})
def create_saml_provider(self, saml_metadata_document, name):
"""
Creates an IAM entity to describe an identity provider (IdP)
that supports SAML 2.0.
The SAML provider that you create with this operation can be
used as a principal in a role's trust policy to establish a
trust relationship between AWS and a SAML identity provider.
You can create an IAM role that supports Web-based single
sign-on (SSO) to the AWS Management Console or one that
supports API access to AWS.
When you create the SAML provider, you upload an a SAML
metadata document that you get from your IdP and that includes
the issuer's name, expiration information, and keys that can
be used to validate the SAML authentication response
(assertions) that are received from the IdP. You must generate
the metadata document using the identity management software
that is used as your organization's IdP.
This operation requires `Signature Version 4`_.
For more information, see `Giving Console Access Using SAML`_
and `Creating Temporary Security Credentials for SAML
Federation`_ in the Using Temporary Credentials guide.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
For more information, see `Creating Temporary Security Credentials for
SAML Federation`_ in the Using Temporary Security Credentials
guide.
:type name: string
:param name: The name of the provider to create.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'Name': name,
}
return self.get_response('CreateSAMLProvider', params)
def list_saml_providers(self):
"""
Lists the SAML providers in the account.
This operation requires `Signature Version 4`_.
"""
return self.get_response('ListSAMLProviders', {})
def get_saml_provider(self, saml_provider_arn):
"""
Returns the SAML provider metadocument that was uploaded when
the provider was created or updated.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to get information about.
"""
params = {'SAMLProviderArn': saml_provider_arn }
return self.get_response('GetSAMLProvider', params)
def update_saml_provider(self, saml_provider_arn, saml_metadata_document):
"""
Updates the metadata document for an existing SAML provider.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to update.
:type saml_metadata_document: string
:param saml_metadata_document: An XML document generated by an identity
provider (IdP) that supports SAML 2.0. The document includes the
issuer's name, expiration information, and keys that can be used to
validate the SAML authentication response (assertions) that are
received from the IdP. You must generate the metadata document
using the identity management software that is used as your
organization's IdP.
"""
params = {
'SAMLMetadataDocument': saml_metadata_document,
'SAMLProviderArn': saml_provider_arn,
}
return self.get_response('UpdateSAMLProvider', params)
def delete_saml_provider(self, saml_provider_arn):
"""
Deletes a SAML provider.
Deleting the provider does not update any roles that reference
the SAML provider as a principal in their trust policies. Any
attempt to assume a role that references a SAML provider that
has been deleted will fail.
This operation requires `Signature Version 4`_.
:type saml_provider_arn: string
:param saml_provider_arn: The Amazon Resource Name (ARN) of the SAML
provider to delete.
"""
params = {'SAMLProviderArn': saml_provider_arn }
return self.get_response('DeleteSAMLProvider', params)

View File

@ -0,0 +1,45 @@
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the Amazon Kinesis service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kinesis.layer1 import KinesisConnection
return [RegionInfo(name='us-east-1',
endpoint='kinesis.us-east-1.amazonaws.com',
connection_cls=KinesisConnection),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None

View File

@ -0,0 +1,51 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class ProvisionedThroughputExceededException(BotoServerError):
pass
class LimitExceededException(BotoServerError):
pass
class ExpiredIteratorException(BotoServerError):
pass
class ResourceInUseException(BotoServerError):
pass
class ResourceNotFoundException(BotoServerError):
pass
class InvalidArgumentException(BotoServerError):
pass
class SubscriptionRequiredException(BotoServerError):
pass

View File

@ -0,0 +1,707 @@
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_stream(self, stream_name, shard_count):
"""
This operation adds a new Amazon Kinesis stream to your AWS
account. A stream captures and transports data records that
are continuously emitted from different data sources or
producers . Scale-out within an Amazon Kinesis stream is
explicitly supported by means of shards, which are uniquely
identified groups of data records in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each shard can support up to 5 read transactions
per second up to a maximum total of 2 MB of data read per
second. Each shard can support up to 1000 write transactions
per second up to a maximum total of 1 MB data written per
second. You can add shards to a stream if the amount of data
input increases and you can remove shards if the amount of
data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to CREATING. After the stream is
created, Amazon Kinesis sets the stream status to ACTIVE. You
should perform read and write operations only on an ACTIVE
stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the CREATING state at any
point in time.
+ Create more shards than are authorized for your account.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
You can use the `DescribeStream` operation to check the stream
status, which is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is two shards per
stream. If you need to create a stream with more than two shards,
contact AWS Support to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
This operation deletes a stream and all of its shards and
data. You must shut down any applications that are operating
on the stream before you delete the stream. If an application
attempts to operate on a deleted stream, it will receive the
exception `ResourceNotFoundException`.
If the stream is in the ACTIVE state, you can delete it. After
a `DeleteStream` request, the specified stream is in the
DELETING state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord and GetRecords, on a
stream in the DELETING state until the stream deletion is
complete.
When you delete a stream, any shards in that stream are also
deleted.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
This operation returns the following information about the
stream: the current status of the stream, the stream Amazon
Resource Name (ARN), and an array of shard objects that
comprise the stream. For each shard object there is
information about the hash key and sequence number ranges that
the shard spans, and the IDs of any earlier shards that played
in a role in a MergeShards or SplitShard operation that
created the shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned by the Amazon Kinesis
service when a record is put into the stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
If there are more shards available, you can request more
shards by using the shard ID of the last shard returned by the
`DescribeStream` request, in the `ExclusiveStartShardId`
parameter in a subsequent request to `DescribeStream`.
`DescribeStream` is a paginated operation.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with for the stream description.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
This operation returns one or more data records from a shard.
A `GetRecords` operation request can retrieve up to 10 MB of
data.
You specify a shard iterator for the shard that you want to
read data from in the `ShardIterator` parameter. The shard
iterator specifies the position in the shard from which you
want to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in the shard. For more information about the
shard iterator, see GetShardIterator.
`GetRecords` may return a partial result if the response size
limit is exceeded. You will get an error, but not a partial
result if the shard's provisioned throughput is exceeded, the
shard iterator has expired, or an internal processing failure
has occurred. Clients can request a smaller amount of data by
specifying a maximum number of returned records using the
`Limit` parameter. The `Limit` parameter can be set to an
integer value of up to 10,000. If you set the value to an
integer greater than 10,000, you will receive
`InvalidArgumentException`.
A new shard iterator is returned by every `GetRecords` request
in `NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request. When you
repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to
use in your first `GetRecords` request and then use the shard
iterator returned in `NextShardIterator` for subsequent reads.
`GetRecords` can return `null` for the `NextShardIterator` to
reflect that the shard has been closed and that the requested
shard iterator would never have returned more data.
If no items can be processed because of insufficient
provisioned throughput on the shard involved in the request,
`GetRecords` throws `ProvisionedThroughputExceededException`.
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records.
:type limit: integer
:param limit: The maximum number of records to return, which can be set
to a value of up to 10,000.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(record['Data'])
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
This operation returns a shard iterator in `ShardIterator`.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. A
shard iterator specifies this position using the sequence
number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the Amazon
Kinesis stream. The sequence number is assigned by the Amazon
Kinesis service when a record is put into the stream.
You must specify the shard iterator type in the
`GetShardIterator` request. For example, you can set the
`ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
AT_SEQUENCE_NUMBER shard iterator type, or right after the
sequence number by using the AFTER_SEQUENCE_NUMBER shard
iterator type, using sequence numbers returned by earlier
PutRecord, GetRecords or DescribeStream requests. You can
specify the shard iterator type TRIM_HORIZON in the request to
cause `ShardIterator` to point to the last untrimmed record in
the shard in the system, which is the oldest data record in
the shard. Or you can point to just after the most recent
record in the shard, by using the shard iterator type LATEST,
so that you always read the most recent data in the shard.
**Note:** Each shard iterator expires five minutes after it is
returned to the requester.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you will
receive a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see the `Amazon Kinesis
Developer Guide`_.
`GetShardIterator` can return `null` for its `ShardIterator`
to indicate that the shard has been closed and that the
requested iterator will return no more data. A shard can be
closed by a SplitShard or MergeShards operation.
`GetShardIterator` has a limit of 5 transactions per second
per account per shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
This operation returns an array of the names of all the
streams that are associated with the AWS account making the
`ListStreams` request. A given AWS account can have many
streams active at one time.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
This operation merges two adjacent shards in a stream and
combines them into a single shard to reduce the stream's
capacity to ingest and transport data. Two shards are
considered adjacent if the union of the hash key ranges for
the two shards form a contiguous set with no gaps. For
example, if you have two shards, one with a hash key range of
276...381 and the other with a hash key range of 382...454,
then you could merge these two shards into a single shard that
would have a hash key range of 276...454. After the merge, the
single child shard receives data for all hash key values
covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. The operation requires that you specify the
shard to be merged and the adjacent shard for a given stream.
For more information about merging shards, see the `Amazon
Kinesis Developer Guide`_.
If the stream is in the ACTIVE state, you can call
`MergeShards`. If a stream is in CREATING or UPDATING or
DELETING states, then Amazon Kinesis returns a
`ResourceInUseException`. If the specified stream does not
exist, Amazon Kinesis returns a `ResourceNotFoundException`.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to UPDATING. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You use the DescribeStream operation to determine the shard
IDs that are specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
The `SequenceNumberForOrdering` sets the initial sequence
number for the partition key. Later `PutRecord` requests to
the same partition key (from the same client) will
automatically increase from `SequenceNumberForOrdering`,
ensuring strict sequential ordering.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which will be Base64
encoded. The maximum size of the data blob is 50 kilobytes (KB).
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: The sequence number to use as the
initial number for the partition key. Subsequent calls to
`PutRecord` from the same client and for the same partition key
will increase from the `SequenceNumberForOrdering` value.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
params['Data'] = base64.b64encode(params['Data'])
return self.make_request(action='PutRecord',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
This operation splits a shard into two new shards in the
stream, to increase the stream's capacity to ingest and
transport data. `SplitShard` is called when there is a need to
increase the overall capacity of stream because of an expected
increase in the volume of data records being ingested.
`SplitShard` can also be used when a given shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
the `SplitShard` operation to increase stream capacity, so
that more Amazon Kinesis applications can simultaneously read
data from the stream for real-time processing.
The `SplitShard` operation requires that you specify the shard
to be split and the new hash key, which is the position in the
shard where the shard gets split in two. In many cases, the
new hash key might simply be the average of the beginning and
ending hash key, but it can be any hash key value in the range
being mapped into the shard. For more information about
splitting shards, see the `Amazon Kinesis Developer Guide`_.
You can use the DescribeStream operation to determine the
shard ID and hash key values for the `ShardToSplit` and
`NewStartingHashKey` parameters that are specified in the
`SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to UPDATING. After the
operation is completed, Amazon Kinesis sets the stream status
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the ACTIVE state, you can call `SplitShard`. If a stream is
in CREATING or UPDATING or DELETING states, then Amazon
Kinesis returns a `ResourceInUseException`.
If the specified stream does not exist, Amazon Kinesis returns
a `ResourceNotFoundException`. If you try to create more
shards than are authorized for your account, you receive a
`LimitExceededException`.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
will receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read()
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)

View File

@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -134,7 +134,7 @@ class CommandLineGetter(object):
if ami.location.find('pyami') >= 0:
my_amis.append((ami.location, ami))
return my_amis
def get_region(self, params):
region = params.get('region', None)
if isinstance(region, str) or isinstance(region, unicode):
@ -171,7 +171,7 @@ class CommandLineGetter(object):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get_ami_id(self, params):
valid = False
while not valid:
@ -271,20 +271,20 @@ class Server(Model):
"""
Create a new instance based on the specified configuration file or the specified
configuration and the passed in parameters.
If the config_file argument is not None, the configuration is read from there.
If the config_file argument is not None, the configuration is read from there.
Otherwise, the cfg argument is used.
The config file may include other config files with a #import reference. The included
config files must reside in the same directory as the specified file.
The logical_volume argument, if supplied, will be used to get the current physical
volume ID and use that as an override of the value specified in the config file. This
may be useful for debugging purposes when you want to debug with a production config
file but a test Volume.
The dictionary argument may be used to override any EC2 configuration values in the
config file.
config files must reside in the same directory as the specified file.
The logical_volume argument, if supplied, will be used to get the current physical
volume ID and use that as an override of the value specified in the config file. This
may be useful for debugging purposes when you want to debug with a production config
file but a test Volume.
The dictionary argument may be used to override any EC2 configuration values in the
config file.
"""
if config_file:
cfg = Config(path=config_file)
@ -304,7 +304,7 @@ class Server(Model):
zone = params.get('zone')
# deal with possibly passed in logical volume:
if logical_volume != None:
cfg.set('EBS', 'logical_volume_name', logical_volume.name)
cfg.set('EBS', 'logical_volume_name', logical_volume.name)
cfg_fp = StringIO.StringIO()
cfg.write(cfg_fp)
# deal with the possibility that zone and/or keypair are strings read from the config file:
@ -328,7 +328,7 @@ class Server(Model):
print 'Waiting for instance to start so we can set its elastic IP address...'
# Sometimes we get a message from ec2 that says that the instance does not exist.
# Hopefully the following delay will giv eec2 enough time to get to a stable state:
time.sleep(5)
time.sleep(5)
while instance.update() != 'running':
time.sleep(1)
instance.use_ip(elastic_ip)
@ -346,7 +346,7 @@ class Server(Model):
l.append(s)
i += 1
return l
@classmethod
def create_from_instance_id(cls, instance_id, name, description=''):
regions = boto.ec2.regions()
@ -393,9 +393,9 @@ class Server(Model):
s.put()
servers.append(s)
return servers
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
super(Server, self).__init__(id, **kw)
self.ssh_key_file = None
self.ec2 = None
self._cmdshell = None
@ -421,7 +421,7 @@ class Server(Model):
self._instance = instance
except EC2ResponseError:
pass
def _status(self):
status = ''
if self._instance:
@ -484,14 +484,14 @@ class Server(Model):
return kn
def put(self):
Model.put(self)
super(Server, self).put()
self._setup_ec2()
def delete(self):
if self.production:
raise ValueError("Can't delete a production server")
#self.stop()
Model.delete(self)
super(Server, self).delete()
def stop(self):
if self.production:
@ -553,4 +553,4 @@ class Server(Model):
return self.run('apt-get -y install %s' % pkg)

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -30,7 +30,7 @@ def check_hour(val):
return
if int(val) < 0 or int(val) > 23:
raise ValueError
class Task(Model):
"""
@ -40,10 +40,10 @@ class Task(Model):
To keep the operation reasonably efficient and not cause excessive polling,
the minimum granularity of a Task is hourly. Some examples:
hour='*' - the task would be executed each hour
hour='3' - the task would be executed at 3AM GMT each day.
"""
name = StringProperty()
hour = StringProperty(required=True, validator=check_hour, default='*')
@ -57,13 +57,13 @@ class Task(Model):
def start_all(cls, queue_name):
for task in cls.all():
task.start(queue_name)
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
super(Task, self).__init__(id, **kw)
self.hourly = self.hour == '*'
self.daily = self.hour != '*'
self.now = datetime.datetime.utcnow()
def check(self):
"""
Determine how long until the next scheduled time for a Task.
@ -76,7 +76,7 @@ class Task(Model):
if self.hourly and not self.last_executed:
return 0
if self.daily and not self.last_executed:
if int(self.hour) == self.now.hour:
return 0
@ -97,7 +97,7 @@ class Task(Model):
return 82800 # 23 hours, just to be safe
else:
return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
def _run(self, msg, vtimeout):
boto.log.info('Task[%s] - running:%s' % (self.name, self.command))
log_fp = StringIO.StringIO()
@ -170,6 +170,6 @@ class TaskPoller(object):

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -33,7 +33,7 @@ import datetime
class CommandLineGetter(object):
def get_region(self, params):
if not params.get('region', None):
prop = self.cls.find_property('region_name')
@ -44,7 +44,7 @@ class CommandLineGetter(object):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get_name(self, params):
if not params.get('name', None):
prop = self.cls.find_property('name')
@ -151,7 +151,7 @@ class Volume(Model):
v.zone_name = self.zone_name
v.put()
return v
def get_ec2_connection(self):
if self.server:
return self.server.ec2
@ -396,7 +396,7 @@ class Volume(Model):
boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name))
snap.delete()
return snaps
def grow(self, size):
pass
@ -411,10 +411,10 @@ class Volume(Model):
self.detach()
ec2 = self.get_ec2_connection()
ec2.delete_volume(self.volume_id)
Model.delete(self)
super(Volume, self).delete()
def archive(self):
# snapshot volume, trim snaps, delete volume-id
pass

View File

@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@ -87,7 +87,7 @@ class Server(Model):
return s
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
super(Server, self).__init__(id, **kw)
self._reservation = None
self._instance = None
self._ssh_client = None
@ -123,13 +123,13 @@ class Server(Model):
return self._instance
instance = property(getInstance, setReadOnly, None, 'The Instance for the server')
def getAMI(self):
if self.instance:
return self.instance.image_id
ami = property(getAMI, setReadOnly, None, 'The AMI for the server')
def getStatus(self):
if self.instance:
self.instance.update()
@ -137,7 +137,7 @@ class Server(Model):
status = property(getStatus, setReadOnly, None,
'The status of the server')
def getHostname(self):
if self.instance:
return self.instance.public_dns_name

View File

@ -54,7 +54,7 @@ class MTurkConnection(AWSQueryConnection):
host = 'mechanicalturk.amazonaws.com'
self.debug = debug
AWSQueryConnection.__init__(self, aws_access_key_id,
super(MTurkConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, host, debug,
@ -875,7 +875,7 @@ class MTurkConnection(AWSQueryConnection):
return duration
class BaseAutoResultElement:
class BaseAutoResultElement(object):
"""
Base class to automatically add attributes when parsing XML
"""
@ -955,7 +955,7 @@ class QualificationRequest(BaseAutoResultElement):
"""
def __init__(self, connection):
BaseAutoResultElement.__init__(self, connection)
super(QualificationRequest, self).__init__(connection)
self.answers = []
def endElement(self, name, value, connection):
@ -967,7 +967,7 @@ class QualificationRequest(BaseAutoResultElement):
xml.sax.parseString(value, h)
self.answers.append(answer_rs)
else:
BaseAutoResultElement.endElement(self, name, value, connection)
super(QualificationRequest, self).endElement(name, value, connection)
class Assignment(BaseAutoResultElement):
@ -980,7 +980,7 @@ class Assignment(BaseAutoResultElement):
"""
def __init__(self, connection):
BaseAutoResultElement.__init__(self, connection)
super(Assignment, self).__init__(connection)
self.answers = []
def endElement(self, name, value, connection):
@ -992,7 +992,7 @@ class Assignment(BaseAutoResultElement):
xml.sax.parseString(value, h)
self.answers.append(answer_rs)
else:
BaseAutoResultElement.endElement(self, name, value, connection)
super(Assignment, self).endElement(name, value, connection)
class QuestionFormAnswer(BaseAutoResultElement):
@ -1016,7 +1016,7 @@ class QuestionFormAnswer(BaseAutoResultElement):
"""
def __init__(self, connection):
BaseAutoResultElement.__init__(self, connection)
super(QuestionFormAnswer, self).__init__(connection)
self.fields = []
self.qid = None

View File

@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class LayoutParameters:
class LayoutParameters(object):
def __init__(self, layoutParameters=None):
if layoutParameters == None:
@ -46,7 +46,7 @@ class LayoutParameter(object):
def __init__(self, name, value):
self.name = name
self.value = value
def get_as_params(self):
params = {
"Name": self.name,

View File

@ -32,7 +32,7 @@ except ImportError:
import base64
import re
class NotificationMessage:
class NotificationMessage(object):
NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl"
NOTIFICATION_VERSION = '2006-05-05'
@ -88,7 +88,7 @@ class NotificationMessage:
signature_calc = base64.b64encode(h.digest())
return self.signature == signature_calc
class Event:
class Event(object):
def __init__(self, d):
self.event_type = d['EventType']
self.event_time_str = d['EventTime']

View File

@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Price:
class Price(object):
def __init__(self, amount=0.0, currency_code='USD'):
self.amount = amount

View File

@ -14,12 +14,12 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Qualifications:
class Qualifications(object):
def __init__(self, requirements=None):
if requirements == None:
@ -49,7 +49,7 @@ class Requirement(object):
self.comparator = comparator
self.integer_value = integer_value
self.required_to_preview = required_to_preview
def get_as_params(self):
params = {
"QualificationTypeId": self.qualification_type_id,
@ -67,7 +67,7 @@ class PercentAssignmentsSubmittedRequirement(Requirement):
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(PercentAssignmentsSubmittedRequirement, self).__init__(qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsAbandonedRequirement(Requirement):
"""
@ -75,7 +75,7 @@ class PercentAssignmentsAbandonedRequirement(Requirement):
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(PercentAssignmentsAbandonedRequirement, self).__init__(qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsReturnedRequirement(Requirement):
"""
@ -83,7 +83,7 @@ class PercentAssignmentsReturnedRequirement(Requirement):
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(PercentAssignmentsReturnedRequirement, self).__init__(qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsApprovedRequirement(Requirement):
"""
@ -91,7 +91,7 @@ class PercentAssignmentsApprovedRequirement(Requirement):
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(PercentAssignmentsApprovedRequirement, self).__init__(qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class PercentAssignmentsRejectedRequirement(Requirement):
"""
@ -99,15 +99,15 @@ class PercentAssignmentsRejectedRequirement(Requirement):
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(PercentAssignmentsRejectedRequirement, self).__init__(qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class NumberHitsApprovedRequirement(Requirement):
"""
Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0.
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(NumberHitsApprovedRequirement, self).__init__(qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
class LocaleRequirement(Requirement):
"""
@ -115,7 +115,7 @@ class LocaleRequirement(Requirement):
"""
def __init__(self, comparator, locale, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview)
super(LocaleRequirement, self).__init__(qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview)
self.locale = locale
def get_as_params(self):
@ -132,6 +132,6 @@ class AdultRequirement(Requirement):
"""
Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default).
"""
def __init__(self, comparator, integer_value, required_to_preview=False):
Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
super(AdultRequirement, self).__init__(qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)

View File

@ -82,12 +82,12 @@ class ExternalQuestion(ValidatingXML):
return self.template % vars(self)
class XMLTemplate:
class XMLTemplate(object):
def get_as_xml(self):
return self.template % vars(self)
class SimpleField(object, XMLTemplate):
class SimpleField(XMLTemplate):
"""
A Simple name/value pair that can be easily rendered as XML.
@ -101,7 +101,7 @@ class SimpleField(object, XMLTemplate):
self.value = value
class Binary(object, XMLTemplate):
class Binary(XMLTemplate):
template = """<Binary><MimeType><Type>%(type)s</Type><SubType>%(subtype)s</SubType></MimeType><DataURL>%(url)s</DataURL><AltText>%(alt_text)s</AltText></Binary>"""
def __init__(self, type, subtype, url, alt_text):
@ -179,7 +179,7 @@ class Flash(Application):
super(Flash, self).get_inner_content(content)
class FormattedContent(object, XMLTemplate):
class FormattedContent(XMLTemplate):
schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd'
template = '<FormattedContent><![CDATA[%(content)s]]></FormattedContent>'

View File

@ -251,7 +251,7 @@ class MWSConnection(AWSQueryConnection):
kw.setdefault('host', 'mws.amazonservices.com')
self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId')
self.SellerId = kw.pop('SellerId', None) or self.Merchant
AWSQueryConnection.__init__(self, *args, **kw)
super(MWSConnection, self).__init__(*args, **kw)
def _required_auth_capability(self):
return ['mws']

View File

@ -82,7 +82,7 @@ class Element(DeclarativeType):
class SimpleList(DeclarativeType):
def __init__(self, *args, **kw):
DeclarativeType.__init__(self, *args, **kw)
super(SimpleList, self).__init__(*args, **kw)
self._value = []
def start(self, *args, **kw):
@ -108,16 +108,16 @@ class MemberList(Element):
assert 'member' not in kw, message
if _member is None:
if _hint is None:
Element.__init__(self, *args, member=ElementList(**kw))
super(MemberList, self).__init__(*args, member=ElementList(**kw))
else:
Element.__init__(self, _hint=_hint)
super(MemberList, self).__init__(_hint=_hint)
else:
if _hint is None:
if issubclass(_member, DeclarativeType):
member = _member(**kw)
else:
member = ElementList(_member, **kw)
Element.__init__(self, *args, member=member)
super(MemberList, self).__init__(*args, member=member)
else:
message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__,
_hint)
@ -130,7 +130,7 @@ class MemberList(Element):
if isinstance(self._value.member, DeclarativeType):
self._value.member = []
self._value = self._value.member
Element.teardown(self, *args, **kw)
super(MemberList, self).teardown(*args, **kw)
def ResponseFactory(action, force=None):
@ -231,7 +231,7 @@ class Response(ResponseElement):
if name == self._name:
self.update(attrs)
else:
return ResponseElement.startElement(self, name, attrs, connection)
return super(Response, self).startElement(name, attrs, connection)
@property
def _result(self):
@ -247,7 +247,7 @@ class ResponseResultList(Response):
def __init__(self, *args, **kw):
setattr(self, self._action + 'Result', ElementList(self._ResultClass))
Response.__init__(self, *args, **kw)
super(ResponseResultList, self).__init__(*args, **kw)
class FeedSubmissionInfo(ResponseElement):
@ -374,13 +374,13 @@ class ComplexAmount(ResponseElement):
if name not in ('CurrencyCode', self._amount):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return ResponseElement.startElement(self, name, attrs, connection)
return super(ComplexAmount, self).startElement(name, attrs, connection)
@strip_namespace
def endElement(self, name, value, connection):
if name == self._amount:
value = Decimal(value)
ResponseElement.endElement(self, name, value, connection)
super(ComplexAmount, self).endElement(name, value, connection)
class ComplexMoney(ComplexAmount):
@ -402,13 +402,13 @@ class ComplexWeight(ResponseElement):
if name not in ('Unit', 'Value'):
message = 'Unrecognized tag {0} in ComplexWeight'.format(name)
raise AssertionError(message)
return ResponseElement.startElement(self, name, attrs, connection)
return super(ComplexWeight, self).startElement(name, attrs, connection)
@strip_namespace
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
ResponseElement.endElement(self, name, value, connection)
super(ComplexWeight, self).endElement(name, value, connection)
class Dimension(ComplexType):
@ -501,7 +501,7 @@ class ItemAttributes(AttributeSet):
'MediaType', 'OperatingSystem', 'Platform')
for name in names:
setattr(self, name, SimpleList())
AttributeSet.__init__(self, *args, **kw)
super(ItemAttributes, self).__init__(*args, **kw)
class VariationRelationship(ResponseElement):
@ -605,7 +605,7 @@ class ProductCategory(ResponseElement):
def __init__(self, *args, **kw):
setattr(self, 'Parent', Element(ProductCategory))
ResponseElement.__init__(self, *args, **kw)
super(ProductCategory, self).__init__(*args, **kw)
class GetProductCategoriesResult(ResponseElement):

View File

@ -20,7 +20,11 @@
# IN THE SOFTWARE.
#
import json
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
@ -41,6 +45,23 @@ class OpsWorksConnection(AWSQueryConnection):
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks API is by using the
AWS Command Line Interface (CLI) or by using one of the AWS SDKs
to implement applications in your preferred language. For more
information, see:
+ `AWS CLI`_
+ `AWS SDK for Java`_
+ `AWS SDK for .NET`_
+ `AWS SDK for PHP 2`_
+ `AWS SDK for Ruby`_
+ `AWS SDK for Node.js`_
+ `AWS SDK for Python(Boto)`_
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
@ -53,7 +74,8 @@ class OpsWorksConnection(AWSQueryConnection):
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9 or 11.4. The default value is currently 0.9. However,
we expect to change the default value to 11.4 in September 2013.
we expect to change the default value to 11.4 in October 2013. For
more information, see `Using AWS OpsWorks with Chef 11`_.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
@ -74,7 +96,7 @@ class OpsWorksConnection(AWSQueryConnection):
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
super(OpsWorksConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
@ -85,7 +107,13 @@ class OpsWorksConnection(AWSQueryConnection):
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
``_.
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
@ -105,7 +133,13 @@ class OpsWorksConnection(AWSQueryConnection):
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
more information, see ``_.
more information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@ -131,6 +165,12 @@ class OpsWorksConnection(AWSQueryConnection):
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
@ -160,6 +200,11 @@ class OpsWorksConnection(AWSQueryConnection):
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type source_stack_id: string
:param source_stack_id: The source stack ID.
@ -233,20 +278,20 @@ class OpsWorksConnection(AWSQueryConnection):
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
Layer_Dependent, which creates host names by appending integers to
the layer's short name. The other themes are:
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ Baked_Goods
+ Clouds
+ European_Cities
+ Fruits
+ Greek_Deities
+ Legendary_Creatures_from_Japan
+ Planets_and_Moons
+ Roman_Deities
+ Scottish_Islands
+ US_Cities
+ Wild_Cats
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
@ -359,6 +404,12 @@ class OpsWorksConnection(AWSQueryConnection):
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -430,6 +481,12 @@ class OpsWorksConnection(AWSQueryConnection):
For more information, see `Deploying Apps`_ and `Run Stack
Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -479,6 +536,12 @@ class OpsWorksConnection(AWSQueryConnection):
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -614,6 +677,12 @@ class OpsWorksConnection(AWSQueryConnection):
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The layer stack ID.
@ -736,6 +805,11 @@ class OpsWorksConnection(AWSQueryConnection):
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
@ -798,20 +872,20 @@ class OpsWorksConnection(AWSQueryConnection):
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
Layer_Dependent, which creates host names by appending integers to
the layer's short name. The other themes are:
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ Baked_Goods
+ Clouds
+ European_Cities
+ Fruits
+ Greek_Deities
+ Legendary_Creatures_from_Japan
+ Planets_and_Moons
+ Roman_Deities
+ Scottish_Islands
+ US_Cities
+ Wild_Cats
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
@ -902,10 +976,15 @@ class OpsWorksConnection(AWSQueryConnection):
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None):
ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
@ -915,12 +994,19 @@ class OpsWorksConnection(AWSQueryConnection):
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
``_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
@ -928,6 +1014,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
@ -943,6 +1035,12 @@ class OpsWorksConnection(AWSQueryConnection):
you can delete it. For more information, see `Deleting
Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
@ -969,6 +1067,12 @@ class OpsWorksConnection(AWSQueryConnection):
all associated instances. For more information, see `How to
Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
@ -983,6 +1087,12 @@ class OpsWorksConnection(AWSQueryConnection):
instances, layers, and apps. For more information, see `Shut
Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -995,6 +1105,11 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
@ -1007,7 +1122,13 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
``_.
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@ -1020,7 +1141,14 @@ class OpsWorksConnection(AWSQueryConnection):
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
registered by another stack. For more information, see ``_.
registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
@ -1036,6 +1164,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
@ -1062,6 +1196,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
@ -1096,6 +1236,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
@ -1129,6 +1275,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
@ -1162,6 +1314,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
@ -1186,6 +1344,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
@ -1220,6 +1384,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -1244,6 +1414,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type layer_ids: list
:param layer_ids: An array of layer IDs.
@ -1252,10 +1428,31 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn, stack_id):
def describe_my_user_profile(self):
"""
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
"""
params = {}
return self.make_request(action='DescribeMyUserProfile',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
@ -1264,7 +1461,11 @@ class OpsWorksConnection(AWSQueryConnection):
:param stack_id: The stack ID.
"""
params = {'IamUserArn': iam_user_arn, 'StackId': stack_id, }
params = {}
if iam_user_arn is not None:
params['IamUserArn'] = iam_user_arn
if stack_id is not None:
params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
@ -1274,6 +1475,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
@ -1299,6 +1506,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Describes AWS OpsWorks service errors.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
@ -1326,10 +1539,36 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
and the number of instances in each state, such as
`running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackSummary',
body=json.dumps(params))
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
@ -1349,6 +1588,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_ids: list
:param instance_ids: An array of instance IDs.
@ -1357,16 +1602,23 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
def describe_user_profiles(self, iam_user_arns):
def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
params = {'IamUserArns': iam_user_arns, }
params = {}
if iam_user_arns is not None:
params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
@ -1377,6 +1629,12 @@ class OpsWorksConnection(AWSQueryConnection):
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
@ -1415,6 +1673,12 @@ class OpsWorksConnection(AWSQueryConnection):
Detaches a specified Elastic Load Balancing instance from its
layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
@ -1435,7 +1699,13 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
information, see ``_.
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@ -1450,6 +1720,12 @@ class OpsWorksConnection(AWSQueryConnection):
Gets a generated host name for the specified layer, based on
the current host name theme.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
@ -1463,6 +1739,12 @@ class OpsWorksConnection(AWSQueryConnection):
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
@ -1477,7 +1759,13 @@ class OpsWorksConnection(AWSQueryConnection):
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
``_.
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
@ -1495,7 +1783,14 @@ class OpsWorksConnection(AWSQueryConnection):
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
calling DeregisterVolume. For more information, see ``_.
calling DeregisterVolume. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
@ -1523,6 +1818,12 @@ class OpsWorksConnection(AWSQueryConnection):
you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
@ -1553,11 +1854,17 @@ class OpsWorksConnection(AWSQueryConnection):
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
allow_sudo=None):
allow_sudo=None, level=None):
"""
Specifies a stack's permissions. For more information, see
`Security and Permissions`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -1572,12 +1879,28 @@ class OpsWorksConnection(AWSQueryConnection):
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
:type level: string
:param level: The user's permission level, which must be set to one of
the following strings. You cannot set your own permissions level.
+ `deny`
+ `show`
+ `deploy`
+ `manage`
+ `iam_only`
For more information on the permissions associated with these levels,
see `Managing User Permissions`_
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
if level is not None:
params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
@ -1588,6 +1911,12 @@ class OpsWorksConnection(AWSQueryConnection):
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
@ -1607,6 +1936,12 @@ class OpsWorksConnection(AWSQueryConnection):
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
@ -1619,6 +1954,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Starts stack's instances.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -1635,6 +1976,12 @@ class OpsWorksConnection(AWSQueryConnection):
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
@ -1647,6 +1994,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -1658,7 +2011,14 @@ class OpsWorksConnection(AWSQueryConnection):
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
registered with the stack. For more information, see ``_.
registered with the stack. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
@ -1674,6 +2034,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
@ -1728,7 +2094,13 @@ class OpsWorksConnection(AWSQueryConnection):
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
information, see ``_.
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
@ -1751,6 +2123,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
@ -1854,6 +2232,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
@ -1947,6 +2331,25 @@ class OpsWorksConnection(AWSQueryConnection):
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
def update_my_user_profile(self, ssh_public_key=None):
"""
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
:type ssh_public_key: string
:param ssh_public_key: The user's SSH public key.
"""
params = {}
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
return self.make_request(action='UpdateMyUserProfile',
body=json.dumps(params))
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
@ -1958,6 +2361,12 @@ class OpsWorksConnection(AWSQueryConnection):
"""
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
@ -1995,20 +2404,20 @@ class OpsWorksConnection(AWSQueryConnection):
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
Layer_Dependent, which creates host names by appending integers to
the layer's short name. The other themes are:
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ Baked_Goods
+ Clouds
+ European_Cities
+ Fruits
+ Greek_Deities
+ Legendary_Creatures_from_Japan
+ Planets_and_Moons
+ Roman_Deities
+ Scottish_Islands
+ US_Cities
+ Wild_Cats
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
@ -2096,10 +2505,15 @@ class OpsWorksConnection(AWSQueryConnection):
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None):
ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
@ -2109,19 +2523,32 @@ class OpsWorksConnection(AWSQueryConnection):
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Managing User Permissions`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
information, see ``_.
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.

View File

@ -290,7 +290,7 @@ class Provider(object):
# clear to users.
metadata = get_instance_metadata(
timeout=timeout, num_retries=attempts,
data='meta-data/iam/security-credentials')
data='meta-data/iam/security-credentials/')
if metadata:
# I'm assuming there's only one role on the instance profile.
security = metadata.values()[0]

Some files were not shown because too many files have changed in this diff Show More