mirror of
https://github.com/ansible/awx.git
synced 2026-01-14 19:30:39 -03:30
Upgrade boto to 2.32.1
This commit is contained in:
parent
6f6f8675f9
commit
7899463ac8
@ -7,7 +7,7 @@ anyjson==0.3.3 (anyjson/*)
|
||||
argparse==1.2.1 (argparse.py, needed for Python 2.6 support)
|
||||
Babel==1.3 (babel/*, excluded bin/pybabel)
|
||||
billiard==3.3.0.16 (billiard/*, funtests/*, excluded _billiard.so)
|
||||
boto==2.27.0 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin,
|
||||
boto==2.32.1 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin,
|
||||
bin/cq, bin/cwutil, bin/dynamodb_dump, bin/dynamodb_load, bin/elbadmin,
|
||||
bin/fetch_file, bin/glacier, bin/instance_events, bin/kill_instance,
|
||||
bin/launch_instance, bin/list_instances, bin/lss3, bin/mturk,
|
||||
|
||||
@ -34,10 +34,11 @@ import re
|
||||
import sys
|
||||
import logging
|
||||
import logging.config
|
||||
import urlparse
|
||||
|
||||
from boto.compat import urlparse
|
||||
from boto.exception import InvalidUriError
|
||||
|
||||
__version__ = '2.27.0'
|
||||
__version__ = '2.32.1'
|
||||
Version = __version__ # for backware compatibility
|
||||
|
||||
# http://bugs.python.org/issue7980
|
||||
@ -492,7 +493,7 @@ def connect_ec2_endpoint(url, aws_access_key_id=None,
|
||||
"""
|
||||
from boto.ec2.regioninfo import RegionInfo
|
||||
|
||||
purl = urlparse.urlparse(url)
|
||||
purl = urlparse(url)
|
||||
kwargs['port'] = purl.port
|
||||
kwargs['host'] = purl.hostname
|
||||
kwargs['path'] = purl.path
|
||||
@ -653,7 +654,7 @@ def connect_cloudsearch(aws_access_key_id=None,
|
||||
:type aws_secret_access_key: string
|
||||
:param aws_secret_access_key: Your AWS Secret Access Key
|
||||
|
||||
:rtype: :class:`boto.ec2.autoscale.CloudSearchConnection`
|
||||
:rtype: :class:`boto.cloudsearch.layer2.Layer2`
|
||||
:return: A connection to Amazon's CloudSearch service
|
||||
"""
|
||||
from boto.cloudsearch.layer2 import Layer2
|
||||
@ -661,6 +662,24 @@ def connect_cloudsearch(aws_access_key_id=None,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def connect_cloudsearch2(aws_access_key_id=None,
|
||||
aws_secret_access_key=None,
|
||||
**kwargs):
|
||||
"""
|
||||
:type aws_access_key_id: string
|
||||
:param aws_access_key_id: Your AWS Access Key ID
|
||||
|
||||
:type aws_secret_access_key: string
|
||||
:param aws_secret_access_key: Your AWS Secret Access Key
|
||||
|
||||
:rtype: :class:`boto.cloudsearch2.layer2.Layer2`
|
||||
:return: A connection to Amazon's CloudSearch2 service
|
||||
"""
|
||||
from boto.cloudsearch2.layer2 import Layer2
|
||||
return Layer2(aws_access_key_id, aws_secret_access_key,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def connect_beanstalk(aws_access_key_id=None,
|
||||
aws_secret_access_key=None,
|
||||
**kwargs):
|
||||
@ -817,6 +836,28 @@ def connect_kinesis(aws_access_key_id=None,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def connect_logs(aws_access_key_id=None,
|
||||
aws_secret_access_key=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Connect to Amazon CloudWatch Logs
|
||||
|
||||
:type aws_access_key_id: string
|
||||
:param aws_access_key_id: Your AWS Access Key ID
|
||||
|
||||
:type aws_secret_access_key: string
|
||||
:param aws_secret_access_key: Your AWS Secret Access Key
|
||||
|
||||
rtype: :class:`boto.kinesis.layer1.CloudWatchLogsConnection`
|
||||
:return: A connection to the Amazon CloudWatch Logs service
|
||||
"""
|
||||
from boto.logs.layer1 import CloudWatchLogsConnection
|
||||
return CloudWatchLogsConnection(
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
|
||||
bucket_storage_uri_class=BucketStorageUri,
|
||||
suppress_consec_slashes=True, is_latest=False):
|
||||
@ -861,7 +902,7 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
|
||||
version_id = None
|
||||
generation = None
|
||||
|
||||
# Manually parse URI components instead of using urlparse.urlparse because
|
||||
# Manually parse URI components instead of using urlparse because
|
||||
# what we're calling URIs don't really fit the standard syntax for URIs
|
||||
# (the latter includes an optional host/net location part).
|
||||
end_scheme_idx = uri_str.find('://')
|
||||
|
||||
@ -39,10 +39,9 @@ import hmac
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
import posixpath
|
||||
|
||||
from boto.compat import urllib, encodebytes
|
||||
from boto.auth_handler import AuthHandler
|
||||
from boto.exception import BotoClientError
|
||||
|
||||
@ -65,9 +64,10 @@ class HmacKeys(object):
|
||||
|
||||
def update_provider(self, provider):
|
||||
self._provider = provider
|
||||
self._hmac = hmac.new(self._provider.secret_key, digestmod=sha)
|
||||
self._hmac = hmac.new(self._provider.secret_key.encode('utf-8'),
|
||||
digestmod=sha)
|
||||
if sha256:
|
||||
self._hmac_256 = hmac.new(self._provider.secret_key,
|
||||
self._hmac_256 = hmac.new(self._provider.secret_key.encode('utf-8'),
|
||||
digestmod=sha256)
|
||||
else:
|
||||
self._hmac_256 = None
|
||||
@ -83,13 +83,13 @@ class HmacKeys(object):
|
||||
digestmod = sha256
|
||||
else:
|
||||
digestmod = sha
|
||||
return hmac.new(self._provider.secret_key,
|
||||
return hmac.new(self._provider.secret_key.encode('utf-8'),
|
||||
digestmod=digestmod)
|
||||
|
||||
def sign_string(self, string_to_sign):
|
||||
new_hmac = self._get_hmac()
|
||||
new_hmac.update(string_to_sign)
|
||||
return base64.encodestring(new_hmac.digest()).strip()
|
||||
new_hmac.update(string_to_sign.encode('utf-8'))
|
||||
return encodebytes(new_hmac.digest()).decode('utf-8').strip()
|
||||
|
||||
def __getstate__(self):
|
||||
pickled_dict = copy.copy(self.__dict__)
|
||||
@ -271,7 +271,7 @@ class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
|
||||
req.headers['X-Amz-Security-Token'] = self._provider.security_token
|
||||
string_to_sign, headers_to_sign = self.string_to_sign(req)
|
||||
boto.log.debug('StringToSign:\n%s' % string_to_sign)
|
||||
hash_value = sha256(string_to_sign).digest()
|
||||
hash_value = sha256(string_to_sign.encode('utf-8')).digest()
|
||||
b64_hmac = self.sign_string(hash_value)
|
||||
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
|
||||
s += "Algorithm=%s," % self.algorithm()
|
||||
@ -298,6 +298,9 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
self.region_name = region_name
|
||||
|
||||
def _sign(self, key, msg, hex=False):
|
||||
if not isinstance(key, bytes):
|
||||
key = key.encode('utf-8')
|
||||
|
||||
if hex:
|
||||
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
|
||||
else:
|
||||
@ -310,7 +313,6 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
in the StringToSign.
|
||||
"""
|
||||
host_header_value = self.host_header(self.host, http_request)
|
||||
headers_to_sign = {}
|
||||
headers_to_sign = {'Host': host_header_value}
|
||||
for name, value in http_request.headers.items():
|
||||
lname = name.lower()
|
||||
@ -330,8 +332,8 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
pairs = []
|
||||
for pname in parameter_names:
|
||||
pval = boto.utils.get_utf8_value(http_request.params[pname])
|
||||
pairs.append(urllib.quote(pname, safe='') + '=' +
|
||||
urllib.quote(pval, safe='-_~'))
|
||||
pairs.append(urllib.parse.quote(pname, safe='') + '=' +
|
||||
urllib.parse.quote(pval, safe='-_~'))
|
||||
return '&'.join(pairs)
|
||||
|
||||
def canonical_query_string(self, http_request):
|
||||
@ -342,8 +344,8 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
l = []
|
||||
for param in sorted(http_request.params):
|
||||
value = boto.utils.get_utf8_value(http_request.params[param])
|
||||
l.append('%s=%s' % (urllib.quote(param, safe='-_.~'),
|
||||
urllib.quote(value, safe='-_.~')))
|
||||
l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'),
|
||||
urllib.parse.quote(value.decode('utf-8'), safe='-_.~')))
|
||||
return '&'.join(l)
|
||||
|
||||
def canonical_headers(self, headers_to_sign):
|
||||
@ -376,7 +378,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
# in windows normpath('/') will be '\\' so we chane it back to '/'
|
||||
normalized = posixpath.normpath(path).replace('\\','/')
|
||||
# Then urlencode whatever's left.
|
||||
encoded = urllib.quote(normalized)
|
||||
encoded = urllib.parse.quote(normalized)
|
||||
if len(path) > 1 and path.endswith('/'):
|
||||
encoded += '/'
|
||||
return encoded
|
||||
@ -388,7 +390,9 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
# the entire body into memory.
|
||||
if hasattr(body, 'seek') and hasattr(body, 'read'):
|
||||
return boto.utils.compute_hash(body, hash_algorithm=sha256)[0]
|
||||
return sha256(http_request.body).hexdigest()
|
||||
elif not isinstance(body, bytes):
|
||||
body = body.encode('utf-8')
|
||||
return sha256(body).hexdigest()
|
||||
|
||||
def canonical_request(self, http_request):
|
||||
cr = [http_request.method.upper()]
|
||||
@ -462,7 +466,7 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys):
|
||||
sts = ['AWS4-HMAC-SHA256']
|
||||
sts.append(http_request.headers['X-Amz-Date'])
|
||||
sts.append(self.credential_scope(http_request))
|
||||
sts.append(sha256(canonical_request).hexdigest())
|
||||
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
|
||||
return '\n'.join(sts)
|
||||
|
||||
def signature(self, http_request, string_to_sign):
|
||||
@ -538,11 +542,11 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
|
||||
def canonical_uri(self, http_request):
|
||||
# S3 does **NOT** do path normalization that SigV4 typically does.
|
||||
# Urlencode the path, **NOT** ``auth_path`` (because vhosting).
|
||||
path = urlparse.urlparse(http_request.path)
|
||||
path = urllib.parse.urlparse(http_request.path)
|
||||
# Because some quoting may have already been applied, let's back it out.
|
||||
unquoted = urllib.unquote(path.path)
|
||||
unquoted = urllib.parse.unquote(path.path)
|
||||
# Requote, this time addressing all characters.
|
||||
encoded = urllib.quote(unquoted)
|
||||
encoded = urllib.parse.quote(unquoted)
|
||||
return encoded
|
||||
|
||||
def host_header(self, host, http_request):
|
||||
@ -558,7 +562,6 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
|
||||
in the StringToSign.
|
||||
"""
|
||||
host_header_value = self.host_header(self.host, http_request)
|
||||
headers_to_sign = {}
|
||||
headers_to_sign = {'Host': host_header_value}
|
||||
for name, value in http_request.headers.items():
|
||||
lname = name.lower()
|
||||
@ -602,6 +605,11 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
|
||||
if part == 's3':
|
||||
# If it's by itself, the region is the previous part.
|
||||
region_name = parts[-offset]
|
||||
|
||||
# Unless it's Vhosted classic
|
||||
if region_name == 'amazonaws':
|
||||
region_name = 'us-east-1'
|
||||
|
||||
break
|
||||
elif part.startswith('s3-'):
|
||||
region_name = self.clean_region_name(part)
|
||||
@ -628,14 +636,14 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
|
||||
# **ON** the ``path/auth_path``.
|
||||
# Rip them apart, so the ``auth_path/params`` can be signed
|
||||
# appropriately.
|
||||
parsed_path = urlparse.urlparse(modified_req.auth_path)
|
||||
parsed_path = urllib.parse.urlparse(modified_req.auth_path)
|
||||
modified_req.auth_path = parsed_path.path
|
||||
|
||||
if modified_req.params is None:
|
||||
modified_req.params = {}
|
||||
|
||||
raw_qs = parsed_path.query
|
||||
existing_qs = urlparse.parse_qs(
|
||||
existing_qs = urllib.parse.parse_qs(
|
||||
raw_qs,
|
||||
keep_blank_values=True
|
||||
)
|
||||
@ -666,6 +674,54 @@ class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
|
||||
req = self.mangle_path_and_params(req)
|
||||
return super(S3HmacAuthV4Handler, self).add_auth(req, **kwargs)
|
||||
|
||||
def presign(self, req, expires, iso_date=None):
|
||||
"""
|
||||
Presign a request using SigV4 query params. Takes in an HTTP request
|
||||
and an expiration time in seconds and returns a URL.
|
||||
|
||||
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
|
||||
"""
|
||||
if iso_date is None:
|
||||
iso_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
|
||||
|
||||
region = self.determine_region_name(req.host)
|
||||
service = self.determine_service_name(req.host)
|
||||
|
||||
params = {
|
||||
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
|
||||
'X-Amz-Credential': '%s/%s/%s/%s/aws4_request' % (
|
||||
self._provider.access_key,
|
||||
iso_date[:8],
|
||||
region,
|
||||
service
|
||||
),
|
||||
'X-Amz-Date': iso_date,
|
||||
'X-Amz-Expires': expires,
|
||||
'X-Amz-SignedHeaders': 'host'
|
||||
}
|
||||
|
||||
if self._provider.security_token:
|
||||
params['X-Amz-Security-Token'] = self._provider.security_token
|
||||
|
||||
req.params.update(params)
|
||||
|
||||
cr = self.canonical_request(req)
|
||||
|
||||
# We need to replace the payload SHA with a constant
|
||||
cr = '\n'.join(cr.split('\n')[:-1]) + '\nUNSIGNED-PAYLOAD'
|
||||
|
||||
# Date header is expected for string_to_sign, but unused otherwise
|
||||
req.headers['X-Amz-Date'] = iso_date
|
||||
|
||||
sts = self.string_to_sign(req, cr)
|
||||
signature = self.signature(req, sts)
|
||||
|
||||
# Add signature to params now that we have it
|
||||
req.params['X-Amz-Signature'] = signature
|
||||
|
||||
return 'https://%s%s?%s' % (req.host, req.path,
|
||||
urllib.parse.urlencode(req.params))
|
||||
|
||||
|
||||
class QueryAuthHandler(AuthHandler):
|
||||
"""
|
||||
@ -679,16 +735,16 @@ class QueryAuthHandler(AuthHandler):
|
||||
capability = ['pure-query']
|
||||
|
||||
def _escape_value(self, value):
|
||||
# Would normally be ``return urllib.quote(value)``.
|
||||
# Would normally be ``return urllib.parse.quote(value)``.
|
||||
return value
|
||||
|
||||
def _build_query_string(self, params):
|
||||
keys = params.keys()
|
||||
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
|
||||
keys = list(params.keys())
|
||||
keys.sort(key=lambda x: x.lower())
|
||||
pairs = []
|
||||
for key in keys:
|
||||
val = boto.utils.get_utf8_value(params[key])
|
||||
pairs.append(key + '=' + self._escape_value(val))
|
||||
pairs.append(key + '=' + self._escape_value(val.decode('utf-8')))
|
||||
return '&'.join(pairs)
|
||||
|
||||
def add_auth(self, http_request, **kwargs):
|
||||
@ -725,7 +781,7 @@ class QuerySignatureHelper(HmacKeys):
|
||||
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
|
||||
if http_request.method == 'POST':
|
||||
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
|
||||
http_request.body = qs + '&Signature=' + urllib.quote_plus(signature)
|
||||
http_request.body = qs + '&Signature=' + urllib.parse.quote_plus(signature)
|
||||
http_request.headers['Content-Length'] = str(len(http_request.body))
|
||||
else:
|
||||
http_request.body = ''
|
||||
@ -733,7 +789,7 @@ class QuerySignatureHelper(HmacKeys):
|
||||
# already be there, we need to get rid of that and rebuild it
|
||||
http_request.path = http_request.path.split('?')[0]
|
||||
http_request.path = (http_request.path + '?' + qs +
|
||||
'&Signature=' + urllib.quote_plus(signature))
|
||||
'&Signature=' + urllib.parse.quote_plus(signature))
|
||||
|
||||
|
||||
class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
|
||||
@ -746,13 +802,13 @@ class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
|
||||
boto.log.debug('using _calc_signature_0')
|
||||
hmac = self._get_hmac()
|
||||
s = params['Action'] + params['Timestamp']
|
||||
hmac.update(s)
|
||||
hmac.update(s.encode('utf-8'))
|
||||
keys = params.keys()
|
||||
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
|
||||
pairs = []
|
||||
for key in keys:
|
||||
val = boto.utils.get_utf8_value(params[key])
|
||||
pairs.append(key + '=' + urllib.quote(val))
|
||||
pairs.append(key + '=' + urllib.parse.quote(val))
|
||||
qs = '&'.join(pairs)
|
||||
return (qs, base64.b64encode(hmac.digest()))
|
||||
|
||||
@ -777,10 +833,10 @@ class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
|
||||
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
|
||||
pairs = []
|
||||
for key in keys:
|
||||
hmac.update(key)
|
||||
hmac.update(key.encode('utf-8'))
|
||||
val = boto.utils.get_utf8_value(params[key])
|
||||
hmac.update(val)
|
||||
pairs.append(key + '=' + urllib.quote(val))
|
||||
pairs.append(key + '=' + urllib.parse.quote(val))
|
||||
qs = '&'.join(pairs)
|
||||
return (qs, base64.b64encode(hmac.digest()))
|
||||
|
||||
@ -803,13 +859,13 @@ class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
|
||||
pairs = []
|
||||
for key in keys:
|
||||
val = boto.utils.get_utf8_value(params[key])
|
||||
pairs.append(urllib.quote(key, safe='') + '=' +
|
||||
urllib.quote(val, safe='-_~'))
|
||||
pairs.append(urllib.parse.quote(key, safe='') + '=' +
|
||||
urllib.parse.quote(val, safe='-_~'))
|
||||
qs = '&'.join(pairs)
|
||||
boto.log.debug('query string: %s' % qs)
|
||||
string_to_sign += qs
|
||||
boto.log.debug('string_to_sign: %s' % string_to_sign)
|
||||
hmac.update(string_to_sign)
|
||||
hmac.update(string_to_sign.encode('utf-8'))
|
||||
b64 = base64.b64encode(hmac.digest())
|
||||
boto.log.debug('len(b64)=%d' % len(b64))
|
||||
boto.log.debug('base64 encoded digest: %s' % b64)
|
||||
@ -841,7 +897,7 @@ class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler):
|
||||
# already be there, we need to get rid of that and rebuild it
|
||||
req.path = req.path.split('?')[0]
|
||||
req.path = (req.path + '?' + qs +
|
||||
'&Signature=' + urllib.quote_plus(signature))
|
||||
'&Signature=' + urllib.parse.quote_plus(signature))
|
||||
|
||||
|
||||
def get_auth_handler(host, config, provider, requested_capability=None):
|
||||
@ -904,6 +960,9 @@ def detect_potential_sigv4(func):
|
||||
return ['hmac-v4']
|
||||
|
||||
if hasattr(self, 'region'):
|
||||
# If you're making changes here, you should also check
|
||||
# ``boto/iam/connection.py``, as several things there are also
|
||||
# endpoint-related.
|
||||
if getattr(self.region, 'endpoint', ''):
|
||||
if '.cn-' in self.region.endpoint:
|
||||
return ['hmac-v4']
|
||||
@ -921,6 +980,9 @@ def detect_potential_s3sigv4(func):
|
||||
return ['hmac-v4-s3']
|
||||
|
||||
if hasattr(self, 'host'):
|
||||
# If you're making changes here, you should also check
|
||||
# ``boto/iam/connection.py``, as several things there are also
|
||||
# endpoint-related.
|
||||
if '.cn-' in self.host:
|
||||
return ['hmac-v4-s3']
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
Defines an interface which all Auth handlers need to implement.
|
||||
"""
|
||||
|
||||
from plugin import Plugin
|
||||
from boto.plugin import Plugin
|
||||
|
||||
class NotReadyToAuthenticate(Exception):
|
||||
pass
|
||||
|
||||
@ -4,12 +4,14 @@ from boto.exception import BotoServerError
|
||||
|
||||
|
||||
def simple(e):
|
||||
err = json.loads(e.error_message)
|
||||
code = err['Error']['Code']
|
||||
code = e.code
|
||||
|
||||
if code.endswith('Exception'):
|
||||
code = code.rstrip('Exception')
|
||||
|
||||
try:
|
||||
# Dynamically get the error class.
|
||||
simple_e = getattr(sys.modules[__name__], code)(e, err)
|
||||
simple_e = getattr(sys.modules[__name__], code)(e)
|
||||
except AttributeError:
|
||||
# Return original exception on failure.
|
||||
return e
|
||||
@ -18,12 +20,9 @@ def simple(e):
|
||||
|
||||
|
||||
class SimpleException(BotoServerError):
|
||||
def __init__(self, e, err):
|
||||
def __init__(self, e):
|
||||
super(SimpleException, self).__init__(e.status, e.reason, e.body)
|
||||
self.body = e.error_message
|
||||
self.request_id = err['RequestId']
|
||||
self.error_code = err['Error']['Code']
|
||||
self.error_message = err['Error']['Message']
|
||||
self.error_message = self.message
|
||||
|
||||
def __repr__(self):
|
||||
return self.__class__.__name__ + ': ' + self.error_message
|
||||
|
||||
@ -63,7 +63,7 @@ class Layer1(AWSQueryConnection):
|
||||
def _get_response(self, action, params, path='/', verb='GET'):
|
||||
params['ContentType'] = 'JSON'
|
||||
response = self.make_request(action, params, path, verb)
|
||||
body = response.read()
|
||||
body = response.read().decode('utf-8')
|
||||
boto.log.debug(body)
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
@ -351,9 +351,9 @@ class Layer1(AWSQueryConnection):
|
||||
self.build_list_params(params, options_to_remove,
|
||||
'OptionsToRemove.member')
|
||||
if tier_name and tier_type and tier_version:
|
||||
params['Tier.member.Name'] = tier_name
|
||||
params['Tier.member.Type'] = tier_type
|
||||
params['Tier.member.Version'] = tier_version
|
||||
params['Tier.Name'] = tier_name
|
||||
params['Tier.Type'] = tier_type
|
||||
params['Tier.Version'] = tier_version
|
||||
return self._get_response('CreateEnvironment', params)
|
||||
|
||||
def create_storage_location(self):
|
||||
@ -1138,9 +1138,9 @@ class Layer1(AWSQueryConnection):
|
||||
self.build_list_params(params, options_to_remove,
|
||||
'OptionsToRemove.member')
|
||||
if tier_name and tier_type and tier_version:
|
||||
params['Tier.member.Name'] = tier_name
|
||||
params['Tier.member.Type'] = tier_type
|
||||
params['Tier.member.Version'] = tier_version
|
||||
params['Tier.Name'] = tier_name
|
||||
params['Tier.Type'] = tier_type
|
||||
params['Tier.Version'] = tier_version
|
||||
return self._get_response('UpdateEnvironment', params)
|
||||
|
||||
def validate_configuration_settings(self, application_name,
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
"""Classify responses from layer1 and strict type values."""
|
||||
from datetime import datetime
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class BaseObject(object):
|
||||
@ -7,7 +8,7 @@ class BaseObject(object):
|
||||
def __repr__(self):
|
||||
result = self.__class__.__name__ + '{ '
|
||||
counter = 0
|
||||
for key, value in self.__dict__.iteritems():
|
||||
for key, value in six.iteritems(self.__dict__):
|
||||
# first iteration no comma
|
||||
counter += 1
|
||||
if counter > 1:
|
||||
|
||||
@ -9,7 +9,7 @@ def beanstalk_wrapper(func, name):
|
||||
def _wrapped_low_level_api(*args, **kwargs):
|
||||
try:
|
||||
response = func(*args, **kwargs)
|
||||
except BotoServerError, e:
|
||||
except BotoServerError as e:
|
||||
raise exception.simple(e)
|
||||
# Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'.
|
||||
cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response'
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from connection import CloudFormationConnection
|
||||
from boto.cloudformation.connection import CloudFormationConnection
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
|
||||
RegionData = load_regions().get('cloudformation')
|
||||
|
||||
@ -271,7 +271,7 @@ class CloudFormationConnection(AWSQueryConnection):
|
||||
:return: Parsed JSON response data
|
||||
"""
|
||||
response = self.make_request(call, params, path, method)
|
||||
body = response.read()
|
||||
body = response.read().decode('utf-8')
|
||||
if response.status == 200:
|
||||
body = json.loads(body)
|
||||
return body
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
import uuid
|
||||
import base64
|
||||
import time
|
||||
from boto.compat import json
|
||||
from boto.compat import six, json
|
||||
from boto.cloudfront.identity import OriginAccessIdentity
|
||||
from boto.cloudfront.object import Object, StreamingObject
|
||||
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
|
||||
@ -665,7 +665,7 @@ class Distribution(object):
|
||||
raise ValueError("You must specify one of private_key_file or private_key_string")
|
||||
# If private_key_file is a file name, open it and read it
|
||||
if private_key_string is None:
|
||||
if isinstance(private_key_file, basestring):
|
||||
if isinstance(private_key_file, six.string_types):
|
||||
with open(private_key_file, 'r') as file_handle:
|
||||
private_key_string = file_handle.read()
|
||||
# Otherwise, treat it like a file
|
||||
|
||||
@ -20,8 +20,8 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import uuid
|
||||
import urllib
|
||||
|
||||
from boto.compat import urllib
|
||||
from boto.resultset import ResultSet
|
||||
|
||||
|
||||
@ -71,7 +71,7 @@ class InvalidationBatch(object):
|
||||
"""Escape a path, make sure it begins with a slash and contains no invalid characters"""
|
||||
if not p[0] == "/":
|
||||
p = "/%s" % p
|
||||
return urllib.quote(p)
|
||||
return urllib.parse.quote(p)
|
||||
|
||||
def to_xml(self):
|
||||
"""Get this batch as XML"""
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from identity import OriginAccessIdentity
|
||||
from boto.cloudfront.identity import OriginAccessIdentity
|
||||
|
||||
def get_oai_value(origin_access_identity):
|
||||
if isinstance(origin_access_identity, OriginAccessIdentity):
|
||||
|
||||
@ -221,13 +221,15 @@ class CommitResponse(object):
|
||||
self.doc_service = doc_service
|
||||
self.sdf = sdf
|
||||
|
||||
_body = response.content.decode('utf-8')
|
||||
|
||||
try:
|
||||
self.content = json.loads(response.content)
|
||||
self.content = json.loads(_body)
|
||||
except:
|
||||
boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n'
|
||||
'SDF:\n{1}'.format(response.content, self.sdf))
|
||||
'SDF:\n{1}'.format(_body, self.sdf))
|
||||
raise boto.exception.BotoServerError(self.response.status_code, '',
|
||||
body=response.content)
|
||||
body=_body)
|
||||
|
||||
self.status = self.content['status']
|
||||
if self.status == 'error':
|
||||
@ -238,6 +240,9 @@ class CommitResponse(object):
|
||||
raise EncodingError("Illegal Unicode character in document")
|
||||
elif e == "The Content-Length is too long":
|
||||
raise ContentTooLongError("Content was too long")
|
||||
if 'adds' not in self.content or 'deletes' not in self.content:
|
||||
raise SearchServiceException("Error indexing documents"
|
||||
" => %s" % self.content.get('message', ''))
|
||||
else:
|
||||
self.errors = []
|
||||
|
||||
|
||||
@ -24,19 +24,19 @@
|
||||
|
||||
import boto
|
||||
from boto.compat import json
|
||||
from .optionstatus import OptionStatus
|
||||
from .optionstatus import IndexFieldStatus
|
||||
from .optionstatus import ServicePoliciesStatus
|
||||
from .optionstatus import RankExpressionStatus
|
||||
from .document import DocumentServiceConnection
|
||||
from .search import SearchConnection
|
||||
from boto.cloudsearch.optionstatus import OptionStatus
|
||||
from boto.cloudsearch.optionstatus import IndexFieldStatus
|
||||
from boto.cloudsearch.optionstatus import ServicePoliciesStatus
|
||||
from boto.cloudsearch.optionstatus import RankExpressionStatus
|
||||
from boto.cloudsearch.document import DocumentServiceConnection
|
||||
from boto.cloudsearch.search import SearchConnection
|
||||
|
||||
def handle_bool(value):
|
||||
if value in [True, 'true', 'True', 'TRUE', 1]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
|
||||
class Domain(object):
|
||||
"""
|
||||
A Cloudsearch domain.
|
||||
@ -118,7 +118,7 @@ class Domain(object):
|
||||
@created.setter
|
||||
def created(self, value):
|
||||
self._created = handle_bool(value)
|
||||
|
||||
|
||||
@property
|
||||
def deleted(self):
|
||||
return self._deleted
|
||||
@ -126,7 +126,7 @@ class Domain(object):
|
||||
@deleted.setter
|
||||
def deleted(self, value):
|
||||
self._deleted = handle_bool(value)
|
||||
|
||||
|
||||
@property
|
||||
def processing(self):
|
||||
return self._processing
|
||||
@ -134,7 +134,7 @@ class Domain(object):
|
||||
@processing.setter
|
||||
def processing(self, value):
|
||||
self._processing = handle_bool(value)
|
||||
|
||||
|
||||
@property
|
||||
def requires_index_documents(self):
|
||||
return self._requires_index_documents
|
||||
@ -142,7 +142,7 @@ class Domain(object):
|
||||
@requires_index_documents.setter
|
||||
def requires_index_documents(self, value):
|
||||
self._requires_index_documents = handle_bool(value)
|
||||
|
||||
|
||||
@property
|
||||
def search_partition_count(self):
|
||||
return self._search_partition_count
|
||||
@ -150,7 +150,7 @@ class Domain(object):
|
||||
@search_partition_count.setter
|
||||
def search_partition_count(self, value):
|
||||
self._search_partition_count = int(value)
|
||||
|
||||
|
||||
@property
|
||||
def search_instance_count(self):
|
||||
return self._search_instance_count
|
||||
@ -158,7 +158,7 @@ class Domain(object):
|
||||
@search_instance_count.setter
|
||||
def search_instance_count(self, value):
|
||||
self._search_instance_count = int(value)
|
||||
|
||||
|
||||
@property
|
||||
def num_searchable_docs(self):
|
||||
return self._num_searchable_docs
|
||||
@ -166,7 +166,7 @@ class Domain(object):
|
||||
@num_searchable_docs.setter
|
||||
def num_searchable_docs(self, value):
|
||||
self._num_searchable_docs = int(value)
|
||||
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.domain_name
|
||||
|
||||
@ -680,7 +680,7 @@ class Layer1(AWSQueryConnection):
|
||||
'update_stemming_options_result',
|
||||
'stems')
|
||||
params = {'DomainName': domain_name,
|
||||
'Stems': stems}
|
||||
'Stems': stems}
|
||||
return self.get_response(doc_path, 'UpdateStemmingOptions',
|
||||
params, verb='POST')
|
||||
|
||||
|
||||
@ -22,8 +22,8 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from .layer1 import Layer1
|
||||
from .domain import Domain
|
||||
from boto.cloudsearch.layer1 import Layer1
|
||||
from boto.cloudsearch.domain import Domain
|
||||
|
||||
|
||||
class Layer2(object):
|
||||
|
||||
@ -22,9 +22,7 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from math import ceil
|
||||
import time
|
||||
import boto
|
||||
from boto.compat import json
|
||||
from boto.compat import json, map, six
|
||||
import requests
|
||||
|
||||
|
||||
@ -52,7 +50,7 @@ class SearchResults(object):
|
||||
|
||||
self.facets = {}
|
||||
if 'facets' in attrs:
|
||||
for (facet, values) in attrs['facets'].iteritems():
|
||||
for (facet, values) in attrs['facets'].items():
|
||||
if 'constraints' in values:
|
||||
self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values['constraints']))
|
||||
|
||||
@ -129,19 +127,19 @@ class Query(object):
|
||||
params['facet'] = ','.join(self.facet)
|
||||
|
||||
if self.facet_constraints:
|
||||
for k, v in self.facet_constraints.iteritems():
|
||||
for k, v in six.iteritems(self.facet_constraints):
|
||||
params['facet-%s-constraints' % k] = v
|
||||
|
||||
if self.facet_sort:
|
||||
for k, v in self.facet_sort.iteritems():
|
||||
for k, v in six.iteritems(self.facet_sort):
|
||||
params['facet-%s-sort' % k] = v
|
||||
|
||||
if self.facet_top_n:
|
||||
for k, v in self.facet_top_n.iteritems():
|
||||
for k, v in six.iteritems(self.facet_top_n):
|
||||
params['facet-%s-top-n' % k] = v
|
||||
|
||||
if self.t:
|
||||
for k, v in self.t.iteritems():
|
||||
for k, v in six.iteritems(self.t):
|
||||
params['t-%s' % k] = v
|
||||
return params
|
||||
|
||||
@ -288,19 +286,20 @@ class SearchConnection(object):
|
||||
params = query.to_params()
|
||||
|
||||
r = requests.get(url, params=params)
|
||||
body = r.content.decode('utf-8')
|
||||
try:
|
||||
data = json.loads(r.content)
|
||||
except ValueError, e:
|
||||
data = json.loads(body)
|
||||
except ValueError as e:
|
||||
if r.status_code == 403:
|
||||
msg = ''
|
||||
import re
|
||||
g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', r.content)
|
||||
g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', body)
|
||||
try:
|
||||
msg = ': %s' % (g.groups()[0].strip())
|
||||
except AttributeError:
|
||||
pass
|
||||
raise SearchServiceException('Authentication error from Amazon%s' % msg)
|
||||
raise SearchServiceException("Got non-json response from Amazon. %s" % r.content, query)
|
||||
raise SearchServiceException("Got non-json response from Amazon. %s" % body, query)
|
||||
|
||||
if 'messages' in data and 'error' in data:
|
||||
for m in data['messages']:
|
||||
|
||||
@ -72,4 +72,3 @@ class SourceAttribute(object):
|
||||
valid = '|'.join(self.ValidDataFunctions)
|
||||
raise ValueError('data_function must be one of: %s' % valid)
|
||||
self._data_function = value
|
||||
|
||||
|
||||
42
awx/lib/site-packages/boto/cloudsearch2/__init__.py
Normal file
42
awx/lib/site-packages/boto/cloudsearch2/__init__.py
Normal file
@ -0,0 +1,42 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
from boto.regioninfo import get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
"""
|
||||
Get all available regions for the Amazon CloudSearch service.
|
||||
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
import boto.cloudsearch2.layer1
|
||||
return get_regions(
|
||||
'cloudsearch',
|
||||
connection_cls=boto.cloudsearch2.layer1.CloudSearchConnection
|
||||
)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
for region in regions():
|
||||
if region.name == region_name:
|
||||
return region.connect(**kw_params)
|
||||
return None
|
||||
275
awx/lib/site-packages/boto/cloudsearch2/document.py
Normal file
275
awx/lib/site-packages/boto/cloudsearch2/document.py
Normal file
@ -0,0 +1,275 @@
|
||||
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
import boto.exception
|
||||
from boto.compat import json
|
||||
import requests
|
||||
import boto
|
||||
|
||||
|
||||
class SearchServiceException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CommitMismatchError(Exception):
|
||||
# Let's do some extra work and let the user handle errors on his/her own.
|
||||
|
||||
errors = None
|
||||
|
||||
|
||||
class EncodingError(Exception):
|
||||
"""
|
||||
Content sent for Cloud Search indexing was incorrectly encoded.
|
||||
|
||||
This usually happens when a document is marked as unicode but non-unicode
|
||||
characters are present.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ContentTooLongError(Exception):
|
||||
"""
|
||||
Content sent for Cloud Search indexing was too long
|
||||
|
||||
This will usually happen when documents queued for indexing add up to more
|
||||
than the limit allowed per upload batch (5MB)
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DocumentServiceConnection(object):
|
||||
"""
|
||||
A CloudSearch document service.
|
||||
|
||||
The DocumentServiceConection is used to add, remove and update documents in
|
||||
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document
|
||||
Format).
|
||||
|
||||
To generate an appropriate SDF, use :func:`add` to add or update documents,
|
||||
as well as :func:`delete` to remove documents.
|
||||
|
||||
Once the set of documents is ready to be index, use :func:`commit` to send
|
||||
the commands to CloudSearch.
|
||||
|
||||
If there are a lot of documents to index, it may be preferable to split the
|
||||
generation of SDF data and the actual uploading into CloudSearch. Retrieve
|
||||
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
|
||||
it can be retrieved back afterwards for upload into CloudSearch using
|
||||
:func:`add_sdf_from_s3`.
|
||||
|
||||
The SDF is not cleared after a :func:`commit`. If you wish to continue
|
||||
using the DocumentServiceConnection for another batch upload of commands,
|
||||
you will need to :func:`clear_sdf` first to stop the previous batch of
|
||||
commands from being uploaded again.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, domain=None, endpoint=None):
|
||||
self.domain = domain
|
||||
self.endpoint = endpoint
|
||||
if not self.endpoint:
|
||||
self.endpoint = domain.doc_service_endpoint
|
||||
self.documents_batch = []
|
||||
self._sdf = None
|
||||
|
||||
def add(self, _id, fields):
|
||||
"""
|
||||
Add a document to be processed by the DocumentService
|
||||
|
||||
The document will not actually be added until :func:`commit` is called
|
||||
|
||||
:type _id: string
|
||||
:param _id: A unique ID used to refer to this document.
|
||||
|
||||
:type fields: dict
|
||||
:param fields: A dictionary of key-value pairs to be uploaded .
|
||||
"""
|
||||
|
||||
d = {'type': 'add', 'id': _id, 'fields': fields}
|
||||
self.documents_batch.append(d)
|
||||
|
||||
def delete(self, _id):
|
||||
"""
|
||||
Schedule a document to be removed from the CloudSearch service
|
||||
|
||||
The document will not actually be scheduled for removal until
|
||||
:func:`commit` is called
|
||||
|
||||
:type _id: string
|
||||
:param _id: The unique ID of this document.
|
||||
"""
|
||||
|
||||
d = {'type': 'delete', 'id': _id}
|
||||
self.documents_batch.append(d)
|
||||
|
||||
def get_sdf(self):
|
||||
"""
|
||||
Generate the working set of documents in Search Data Format (SDF)
|
||||
|
||||
:rtype: string
|
||||
:returns: JSON-formatted string of the documents in SDF
|
||||
"""
|
||||
|
||||
return self._sdf if self._sdf else json.dumps(self.documents_batch)
|
||||
|
||||
def clear_sdf(self):
|
||||
"""
|
||||
Clear the working documents from this DocumentServiceConnection
|
||||
|
||||
This should be used after :func:`commit` if the connection will be
|
||||
reused for another set of documents.
|
||||
"""
|
||||
|
||||
self._sdf = None
|
||||
self.documents_batch = []
|
||||
|
||||
def add_sdf_from_s3(self, key_obj):
|
||||
"""
|
||||
Load an SDF from S3
|
||||
|
||||
Using this method will result in documents added through
|
||||
:func:`add` and :func:`delete` being ignored.
|
||||
|
||||
:type key_obj: :class:`boto.s3.key.Key`
|
||||
:param key_obj: An S3 key which contains an SDF
|
||||
"""
|
||||
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
|
||||
|
||||
self._sdf = key_obj.get_contents_as_string()
|
||||
|
||||
def commit(self):
|
||||
"""
|
||||
Actually send an SDF to CloudSearch for processing
|
||||
|
||||
If an SDF file has been explicitly loaded it will be used. Otherwise,
|
||||
documents added through :func:`add` and :func:`delete` will be used.
|
||||
|
||||
:rtype: :class:`CommitResponse`
|
||||
:returns: A summary of documents added and deleted
|
||||
"""
|
||||
|
||||
sdf = self.get_sdf()
|
||||
|
||||
if ': null' in sdf:
|
||||
boto.log.error('null value in sdf detected. This will probably '
|
||||
'raise 500 error.')
|
||||
index = sdf.index(': null')
|
||||
boto.log.error(sdf[index - 100:index + 100])
|
||||
|
||||
api_version = '2013-01-01'
|
||||
if self.domain:
|
||||
api_version = self.domain.layer1.APIVersion
|
||||
url = "http://%s/%s/documents/batch" % (self.endpoint, api_version)
|
||||
|
||||
# Keep-alive is automatic in a post-1.0 requests world.
|
||||
session = requests.Session()
|
||||
adapter = requests.adapters.HTTPAdapter(
|
||||
pool_connections=20,
|
||||
pool_maxsize=50,
|
||||
max_retries=5
|
||||
)
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
r = session.post(url, data=sdf,
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
return CommitResponse(r, self, sdf)
|
||||
|
||||
|
||||
class CommitResponse(object):
|
||||
"""Wrapper for response to Cloudsearch document batch commit.
|
||||
|
||||
:type response: :class:`requests.models.Response`
|
||||
:param response: Response from Cloudsearch /documents/batch API
|
||||
|
||||
:type doc_service: :class:`boto.cloudsearch2.document.DocumentServiceConnection`
|
||||
:param doc_service: Object containing the documents posted and methods to
|
||||
retry
|
||||
|
||||
:raises: :class:`boto.exception.BotoServerError`
|
||||
:raises: :class:`boto.cloudsearch2.document.SearchServiceException`
|
||||
:raises: :class:`boto.cloudsearch2.document.EncodingError`
|
||||
:raises: :class:`boto.cloudsearch2.document.ContentTooLongError`
|
||||
"""
|
||||
def __init__(self, response, doc_service, sdf):
|
||||
self.response = response
|
||||
self.doc_service = doc_service
|
||||
self.sdf = sdf
|
||||
|
||||
_body = response.content.decode('utf-8')
|
||||
|
||||
try:
|
||||
self.content = json.loads(_body)
|
||||
except:
|
||||
boto.log.error('Error indexing documents.\nResponse Content:\n{0}'
|
||||
'\n\nSDF:\n{1}'.format(_body, self.sdf))
|
||||
raise boto.exception.BotoServerError(self.response.status_code, '',
|
||||
body=_body)
|
||||
|
||||
self.status = self.content['status']
|
||||
if self.status == 'error':
|
||||
self.errors = [e.get('message') for e in self.content.get('errors',
|
||||
[])]
|
||||
for e in self.errors:
|
||||
if "Illegal Unicode character" in e:
|
||||
raise EncodingError("Illegal Unicode character in document")
|
||||
elif e == "The Content-Length is too long":
|
||||
raise ContentTooLongError("Content was too long")
|
||||
else:
|
||||
self.errors = []
|
||||
|
||||
self.adds = self.content['adds']
|
||||
self.deletes = self.content['deletes']
|
||||
self._check_num_ops('add', self.adds)
|
||||
self._check_num_ops('delete', self.deletes)
|
||||
|
||||
def _check_num_ops(self, type_, response_num):
|
||||
"""Raise exception if number of ops in response doesn't match commit
|
||||
|
||||
:type type_: str
|
||||
:param type_: Type of commit operation: 'add' or 'delete'
|
||||
|
||||
:type response_num: int
|
||||
:param response_num: Number of adds or deletes in the response.
|
||||
|
||||
:raises: :class:`boto.cloudsearch2.document.CommitMismatchError`
|
||||
"""
|
||||
commit_num = len([d for d in self.doc_service.documents_batch
|
||||
if d['type'] == type_])
|
||||
|
||||
if response_num != commit_num:
|
||||
boto.log.debug(self.response.content)
|
||||
# There will always be a commit mismatch error if there is any
|
||||
# errors on cloudsearch. self.errors gets lost when this
|
||||
# CommitMismatchError is raised. Whoever is using boto has no idea
|
||||
# why their commit failed. They can't even notify the user of the
|
||||
# cause by parsing the error messages from amazon. So let's
|
||||
# attach the self.errors to the exceptions if we already spent
|
||||
# time and effort collecting them out of the response.
|
||||
exc = CommitMismatchError(
|
||||
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'
|
||||
.format(type_, commit_num, response_num)
|
||||
)
|
||||
exc.errors = self.errors
|
||||
raise exc
|
||||
542
awx/lib/site-packages/boto/cloudsearch2/domain.py
Normal file
542
awx/lib/site-packages/boto/cloudsearch2/domain.py
Normal file
@ -0,0 +1,542 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from boto.cloudsearch2.optionstatus import IndexFieldStatus
|
||||
from boto.cloudsearch2.optionstatus import ServicePoliciesStatus
|
||||
from boto.cloudsearch2.optionstatus import ExpressionStatus
|
||||
from boto.cloudsearch2.optionstatus import AvailabilityOptionsStatus
|
||||
from boto.cloudsearch2.optionstatus import ScalingParametersStatus
|
||||
from boto.cloudsearch2.document import DocumentServiceConnection
|
||||
from boto.cloudsearch2.search import SearchConnection
|
||||
|
||||
|
||||
def handle_bool(value):
|
||||
if value in [True, 'true', 'True', 'TRUE', 1]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Domain(object):
|
||||
"""
|
||||
A Cloudsearch domain.
|
||||
|
||||
:ivar name: The name of the domain.
|
||||
|
||||
:ivar id: The internally generated unique identifier for the domain.
|
||||
|
||||
:ivar created: A boolean which is True if the domain is
|
||||
created. It can take several minutes to initialize a domain
|
||||
when CreateDomain is called. Newly created search domains are
|
||||
returned with a False value for Created until domain creation
|
||||
is complete
|
||||
|
||||
:ivar deleted: A boolean which is True if the search domain has
|
||||
been deleted. The system must clean up resources dedicated to
|
||||
the search domain when delete is called. Newly deleted
|
||||
search domains are returned from list_domains with a True
|
||||
value for deleted for several minutes until resource cleanup
|
||||
is complete.
|
||||
|
||||
:ivar processing: True if processing is being done to activate the
|
||||
current domain configuration.
|
||||
|
||||
:ivar num_searchable_docs: The number of documents that have been
|
||||
submittted to the domain and indexed.
|
||||
|
||||
:ivar requires_index_document: True if index_documents needs to be
|
||||
called to activate the current domain configuration.
|
||||
|
||||
:ivar search_instance_count: The number of search instances that are
|
||||
available to process search requests.
|
||||
|
||||
:ivar search_instance_type: The instance type that is being used to
|
||||
process search requests.
|
||||
|
||||
:ivar search_partition_count: The number of partitions across which
|
||||
the search index is spread.
|
||||
"""
|
||||
|
||||
def __init__(self, layer1, data):
|
||||
"""
|
||||
Constructor - Create a domain object from a layer1 and data params
|
||||
|
||||
:type layer1: :class:`boto.cloudsearch2.layer1.Layer1` object
|
||||
:param layer1: A :class:`boto.cloudsearch2.layer1.Layer1` object
|
||||
which is used to perform operations on the domain.
|
||||
"""
|
||||
self.layer1 = layer1
|
||||
self.update_from_data(data)
|
||||
|
||||
def update_from_data(self, data):
|
||||
self.created = data['Created']
|
||||
self.deleted = data['Deleted']
|
||||
self.processing = data['Processing']
|
||||
self.requires_index_documents = data['RequiresIndexDocuments']
|
||||
self.domain_id = data['DomainId']
|
||||
self.domain_name = data['DomainName']
|
||||
self.search_instance_count = data['SearchInstanceCount']
|
||||
self.search_instance_type = data.get('SearchInstanceType', None)
|
||||
self.search_partition_count = data['SearchPartitionCount']
|
||||
self._doc_service = data['DocService']
|
||||
self._service_arn = data['ARN']
|
||||
self._search_service = data['SearchService']
|
||||
|
||||
@property
|
||||
def service_arn(self):
|
||||
return self._service_arn
|
||||
|
||||
@property
|
||||
def doc_service_endpoint(self):
|
||||
return self._doc_service['Endpoint']
|
||||
|
||||
@property
|
||||
def search_service_endpoint(self):
|
||||
return self._search_service['Endpoint']
|
||||
|
||||
@property
|
||||
def created(self):
|
||||
return self._created
|
||||
|
||||
@created.setter
|
||||
def created(self, value):
|
||||
self._created = handle_bool(value)
|
||||
|
||||
@property
|
||||
def deleted(self):
|
||||
return self._deleted
|
||||
|
||||
@deleted.setter
|
||||
def deleted(self, value):
|
||||
self._deleted = handle_bool(value)
|
||||
|
||||
@property
|
||||
def processing(self):
|
||||
return self._processing
|
||||
|
||||
@processing.setter
|
||||
def processing(self, value):
|
||||
self._processing = handle_bool(value)
|
||||
|
||||
@property
|
||||
def requires_index_documents(self):
|
||||
return self._requires_index_documents
|
||||
|
||||
@requires_index_documents.setter
|
||||
def requires_index_documents(self, value):
|
||||
self._requires_index_documents = handle_bool(value)
|
||||
|
||||
@property
|
||||
def search_partition_count(self):
|
||||
return self._search_partition_count
|
||||
|
||||
@search_partition_count.setter
|
||||
def search_partition_count(self, value):
|
||||
self._search_partition_count = int(value)
|
||||
|
||||
@property
|
||||
def search_instance_count(self):
|
||||
return self._search_instance_count
|
||||
|
||||
@search_instance_count.setter
|
||||
def search_instance_count(self, value):
|
||||
self._search_instance_count = int(value)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.domain_name
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.domain_id
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Delete this domain and all index data associated with it.
|
||||
"""
|
||||
return self.layer1.delete_domain(self.name)
|
||||
|
||||
def get_analysis_schemes(self):
|
||||
"""
|
||||
Return a list of Analysis Scheme objects.
|
||||
"""
|
||||
return self.layer1.describe_analysis_schemes(self.name)
|
||||
|
||||
def get_availability_options(self):
|
||||
"""
|
||||
Return a :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus`
|
||||
object representing the currently defined availability options for
|
||||
the domain.
|
||||
:return: OptionsStatus object
|
||||
:rtype: :class:`boto.cloudsearch2.option.AvailabilityOptionsStatus`
|
||||
object
|
||||
"""
|
||||
return AvailabilityOptionsStatus(
|
||||
self, refresh_fn=self.layer1.describe_availability_options,
|
||||
refresh_key=['DescribeAvailabilityOptionsResponse',
|
||||
'DescribeAvailabilityOptionsResult',
|
||||
'AvailabilityOptions'],
|
||||
save_fn=self.layer1.update_availability_options)
|
||||
|
||||
def get_scaling_options(self):
|
||||
"""
|
||||
Return a :class:`boto.cloudsearch2.option.ScalingParametersStatus`
|
||||
object representing the currently defined scaling options for the
|
||||
domain.
|
||||
:return: ScalingParametersStatus object
|
||||
:rtype: :class:`boto.cloudsearch2.option.ScalingParametersStatus`
|
||||
object
|
||||
"""
|
||||
return ScalingParametersStatus(
|
||||
self, refresh_fn=self.layer1.describe_scaling_parameters,
|
||||
refresh_key=['DescribeScalingParametersResponse',
|
||||
'DescribeScalingParametersResult',
|
||||
'ScalingParameters'],
|
||||
save_fn=self.layer1.update_scaling_parameters)
|
||||
|
||||
def get_access_policies(self):
|
||||
"""
|
||||
Return a :class:`boto.cloudsearch2.option.ServicePoliciesStatus`
|
||||
object representing the currently defined access policies for the
|
||||
domain.
|
||||
:return: ServicePoliciesStatus object
|
||||
:rtype: :class:`boto.cloudsearch2.option.ServicePoliciesStatus` object
|
||||
"""
|
||||
return ServicePoliciesStatus(
|
||||
self, refresh_fn=self.layer1.describe_service_access_policies,
|
||||
refresh_key=['DescribeServiceAccessPoliciesResponse',
|
||||
'DescribeServiceAccessPoliciesResult',
|
||||
'AccessPolicies'],
|
||||
save_fn=self.layer1.update_service_access_policies)
|
||||
|
||||
def index_documents(self):
|
||||
"""
|
||||
Tells the search domain to start indexing its documents using
|
||||
the latest text processing options and IndexFields. This
|
||||
operation must be invoked to make options whose OptionStatus
|
||||
has OptionState of RequiresIndexDocuments visible in search
|
||||
results.
|
||||
"""
|
||||
self.layer1.index_documents(self.name)
|
||||
|
||||
def get_index_fields(self, field_names=None):
|
||||
"""
|
||||
Return a list of index fields defined for this domain.
|
||||
:return: list of IndexFieldStatus objects
|
||||
:rtype: list of :class:`boto.cloudsearch2.option.IndexFieldStatus`
|
||||
object
|
||||
"""
|
||||
data = self.layer1.describe_index_fields(self.name, field_names)
|
||||
|
||||
data = (data['DescribeIndexFieldsResponse']
|
||||
['DescribeIndexFieldsResult']
|
||||
['IndexFields'])
|
||||
|
||||
return [IndexFieldStatus(self, d) for d in data]
|
||||
|
||||
def create_index_field(self, field_name, field_type,
|
||||
default='', facet=False, returnable=False,
|
||||
searchable=False, sortable=False,
|
||||
highlight=False, source_field=None,
|
||||
analysis_scheme=None):
|
||||
"""
|
||||
Defines an ``IndexField``, either replacing an existing
|
||||
definition or creating a new one.
|
||||
|
||||
:type field_name: string
|
||||
:param field_name: The name of a field in the search index.
|
||||
|
||||
:type field_type: string
|
||||
:param field_type: The type of field. Valid values are
|
||||
int | double | literal | text | date | latlon |
|
||||
int-array | double-array | literal-array | text-array | date-array
|
||||
|
||||
:type default: string or int
|
||||
:param default: The default value for the field. If the
|
||||
field is of type ``int`` this should be an integer value.
|
||||
Otherwise, it's a string.
|
||||
|
||||
:type facet: bool
|
||||
:param facet: A boolean to indicate whether facets
|
||||
are enabled for this field or not. Does not apply to
|
||||
fields of type ``int, int-array, text, text-array``.
|
||||
|
||||
:type returnable: bool
|
||||
:param returnable: A boolean to indicate whether values
|
||||
of this field can be returned in search results or
|
||||
used in ranking.
|
||||
|
||||
:type searchable: bool
|
||||
:param searchable: A boolean to indicate whether search
|
||||
is enabled for this field or not.
|
||||
|
||||
:type sortable: bool
|
||||
:param sortable: A boolean to indicate whether sorting
|
||||
is enabled for this field or not. Does not apply to
|
||||
fields of array types.
|
||||
|
||||
:type highlight: bool
|
||||
:param highlight: A boolean to indicate whether highlighting
|
||||
is enabled for this field or not. Does not apply to
|
||||
fields of type ``double, int, date, latlon``
|
||||
|
||||
:type source_field: list of strings or string
|
||||
:param source_field: For array types, this is the list of fields
|
||||
to treat as the source. For singular types, pass a string only.
|
||||
|
||||
:type analysis_scheme: string
|
||||
:param analysis_scheme: The analysis scheme to use for this field.
|
||||
Only applies to ``text | text-array`` field types
|
||||
|
||||
:return: IndexFieldStatus objects
|
||||
:rtype: :class:`boto.cloudsearch2.option.IndexFieldStatus` object
|
||||
|
||||
:raises: BaseException, InternalException, LimitExceededException,
|
||||
InvalidTypeException, ResourceNotFoundException
|
||||
"""
|
||||
index = {
|
||||
'IndexFieldName': field_name,
|
||||
'IndexFieldType': field_type
|
||||
}
|
||||
if field_type == 'literal':
|
||||
index['LiteralOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable,
|
||||
'SortEnabled': sortable
|
||||
}
|
||||
if default:
|
||||
index['LiteralOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['LiteralOptions']['SourceField'] = source_field
|
||||
elif field_type == 'literal-array':
|
||||
index['LiteralArrayOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable
|
||||
}
|
||||
if default:
|
||||
index['LiteralArrayOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['LiteralArrayOptions']['SourceFields'] = \
|
||||
','.join(source_field)
|
||||
elif field_type == 'int':
|
||||
index['IntOptions'] = {
|
||||
'DefaultValue': default,
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable,
|
||||
'SortEnabled': sortable
|
||||
}
|
||||
if default:
|
||||
index['IntOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['IntOptions']['SourceField'] = source_field
|
||||
elif field_type == 'int-array':
|
||||
index['IntArrayOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable
|
||||
}
|
||||
if default:
|
||||
index['IntArrayOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['IntArrayOptions']['SourceFields'] = \
|
||||
','.join(source_field)
|
||||
elif field_type == 'date':
|
||||
index['DateOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable,
|
||||
'SortEnabled': sortable
|
||||
}
|
||||
if default:
|
||||
index['DateOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['DateOptions']['SourceField'] = source_field
|
||||
elif field_type == 'date-array':
|
||||
index['DateArrayOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable
|
||||
}
|
||||
if default:
|
||||
index['DateArrayOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['DateArrayOptions']['SourceFields'] = \
|
||||
','.join(source_field)
|
||||
elif field_type == 'double':
|
||||
index['DoubleOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable,
|
||||
'SortEnabled': sortable
|
||||
}
|
||||
if default:
|
||||
index['DoubleOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['DoubleOptions']['SourceField'] = source_field
|
||||
elif field_type == 'double-array':
|
||||
index['DoubleArrayOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable
|
||||
}
|
||||
if default:
|
||||
index['DoubleArrayOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['DoubleArrayOptions']['SourceFields'] = \
|
||||
','.join(source_field)
|
||||
elif field_type == 'text':
|
||||
index['TextOptions'] = {
|
||||
'ReturnEnabled': returnable,
|
||||
'HighlightEnabled': highlight,
|
||||
'SortEnabled': sortable
|
||||
}
|
||||
if default:
|
||||
index['TextOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['TextOptions']['SourceField'] = source_field
|
||||
if analysis_scheme:
|
||||
index['TextOptions']['AnalysisScheme'] = analysis_scheme
|
||||
elif field_type == 'text-array':
|
||||
index['TextArrayOptions'] = {
|
||||
'ReturnEnabled': returnable,
|
||||
'HighlightEnabled': highlight
|
||||
}
|
||||
if default:
|
||||
index['TextArrayOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['TextArrayOptions']['SourceFields'] = \
|
||||
','.join(source_field)
|
||||
if analysis_scheme:
|
||||
index['TextArrayOptions']['AnalysisScheme'] = analysis_scheme
|
||||
elif field_type == 'latlon':
|
||||
index['LatLonOptions'] = {
|
||||
'FacetEnabled': facet,
|
||||
'ReturnEnabled': returnable,
|
||||
'SearchEnabled': searchable,
|
||||
'SortEnabled': sortable
|
||||
}
|
||||
if default:
|
||||
index['LatLonOptions']['DefaultValue'] = default
|
||||
if source_field:
|
||||
index['LatLonOptions']['SourceField'] = source_field
|
||||
|
||||
data = self.layer1.define_index_field(self.name, index)
|
||||
|
||||
data = (data['DefineIndexFieldResponse']
|
||||
['DefineIndexFieldResult']
|
||||
['IndexField'])
|
||||
|
||||
return IndexFieldStatus(self, data,
|
||||
self.layer1.describe_index_fields)
|
||||
|
||||
def get_expressions(self, names=None):
|
||||
"""
|
||||
Return a list of rank expressions defined for this domain.
|
||||
:return: list of ExpressionStatus objects
|
||||
:rtype: list of :class:`boto.cloudsearch2.option.ExpressionStatus`
|
||||
object
|
||||
"""
|
||||
fn = self.layer1.describe_expressions
|
||||
data = fn(self.name, names)
|
||||
|
||||
data = (data['DescribeExpressionsResponse']
|
||||
['DescribeExpressionsResult']
|
||||
['Expressions'])
|
||||
|
||||
return [ExpressionStatus(self, d, fn) for d in data]
|
||||
|
||||
def create_expression(self, name, value):
|
||||
"""
|
||||
Create a new expression.
|
||||
|
||||
:type name: string
|
||||
:param name: The name of an expression for processing
|
||||
during a search request.
|
||||
|
||||
:type value: string
|
||||
:param value: The expression to evaluate for ranking
|
||||
or thresholding while processing a search request. The
|
||||
Expression syntax is based on JavaScript expressions
|
||||
and supports:
|
||||
|
||||
* Single value, sort enabled numeric fields (int, double, date)
|
||||
* Other expressions
|
||||
* The _score variable, which references a document's relevance
|
||||
score
|
||||
* The _time variable, which references the current epoch time
|
||||
* Integer, floating point, hex, and octal literals
|
||||
* Arithmetic operators: + - * / %
|
||||
* Bitwise operators: | & ^ ~ << >> >>>
|
||||
* Boolean operators (including the ternary operator): && || ! ?:
|
||||
* Comparison operators: < <= == >= >
|
||||
* Mathematical functions: abs ceil exp floor ln log2 log10 logn
|
||||
max min pow sqrt pow
|
||||
* Trigonometric functions: acos acosh asin asinh atan atan2 atanh
|
||||
cos cosh sin sinh tanh tan
|
||||
* The haversin distance function
|
||||
|
||||
Expressions always return an integer value from 0 to the maximum
|
||||
64-bit signed integer value (2^63 - 1). Intermediate results are
|
||||
calculated as double-precision floating point values and the return
|
||||
value is rounded to the nearest integer. If the expression is
|
||||
invalid or evaluates to a negative value, it returns 0. If the
|
||||
expression evaluates to a value greater than the maximum, it
|
||||
returns the maximum value.
|
||||
|
||||
The source data for an Expression can be the name of an
|
||||
IndexField of type int or double, another Expression or the
|
||||
reserved name _score. The _score source is
|
||||
defined to return as a double from 0 to 10.0 (inclusive) to
|
||||
indicate how relevant a document is to the search request,
|
||||
taking into account repetition of search terms in the
|
||||
document and proximity of search terms to each other in
|
||||
each matching IndexField in the document.
|
||||
|
||||
For more information about using rank expressions to
|
||||
customize ranking, see the Amazon CloudSearch Developer
|
||||
Guide.
|
||||
|
||||
:return: ExpressionStatus object
|
||||
:rtype: :class:`boto.cloudsearch2.option.ExpressionStatus` object
|
||||
|
||||
:raises: BaseException, InternalException, LimitExceededException,
|
||||
InvalidTypeException, ResourceNotFoundException
|
||||
"""
|
||||
data = self.layer1.define_expression(self.name, name, value)
|
||||
|
||||
data = (data['DefineExpressionResponse']
|
||||
['DefineExpressionResult']
|
||||
['Expression'])
|
||||
|
||||
return ExpressionStatus(self, data,
|
||||
self.layer1.describe_expressions)
|
||||
|
||||
def get_document_service(self):
|
||||
return DocumentServiceConnection(domain=self)
|
||||
|
||||
def get_search_service(self):
|
||||
return SearchConnection(domain=self)
|
||||
|
||||
def __repr__(self):
|
||||
return '<Domain: %s>' % self.domain_name
|
||||
46
awx/lib/site-packages/boto/cloudsearch2/exceptions.py
Normal file
46
awx/lib/site-packages/boto/cloudsearch2/exceptions.py
Normal file
@ -0,0 +1,46 @@
|
||||
"""
|
||||
Exceptions that are specific to the cloudsearch2 module.
|
||||
"""
|
||||
from boto.exception import BotoServerError
|
||||
|
||||
|
||||
class InvalidTypeException(BotoServerError):
|
||||
"""
|
||||
Raised when an invalid record type is passed to CloudSearch.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class LimitExceededException(BotoServerError):
|
||||
"""
|
||||
Raised when a limit has been exceeded.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class InternalException(BotoServerError):
|
||||
"""
|
||||
A generic server-side error.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DisabledOperationException(BotoServerError):
|
||||
"""
|
||||
Raised when an operation has been disabled.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ResourceNotFoundException(BotoServerError):
|
||||
"""
|
||||
Raised when a requested resource does not exist.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class BaseException(BotoServerError):
|
||||
"""
|
||||
A generic server-side error.
|
||||
"""
|
||||
pass
|
||||
779
awx/lib/site-packages/boto/cloudsearch2/layer1.py
Normal file
779
awx/lib/site-packages/boto/cloudsearch2/layer1.py
Normal file
@ -0,0 +1,779 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.cloudsearch2 import exceptions
|
||||
from boto.compat import json
|
||||
|
||||
|
||||
class CloudSearchConnection(AWSQueryConnection):
|
||||
"""
|
||||
Amazon CloudSearch Configuration Service
|
||||
You use the Amazon CloudSearch configuration service to create,
|
||||
configure, and manage search domains. Configuration service
|
||||
requests are submitted using the AWS Query protocol. AWS Query
|
||||
requests are HTTP or HTTPS requests submitted via HTTP GET or POST
|
||||
with a query parameter named Action.
|
||||
|
||||
The endpoint for configuration service requests is region-
|
||||
specific: cloudsearch. region .amazonaws.com. For example,
|
||||
cloudsearch.us-east-1.amazonaws.com. For a current list of
|
||||
supported regions and endpoints, see `Regions and Endpoints`_.
|
||||
"""
|
||||
APIVersion = "2013-01-01"
|
||||
DefaultRegionName = "us-east-1"
|
||||
DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com"
|
||||
ResponseError = JSONResponseError
|
||||
|
||||
_faults = {
|
||||
"InvalidTypeException": exceptions.InvalidTypeException,
|
||||
"LimitExceededException": exceptions.LimitExceededException,
|
||||
"InternalException": exceptions.InternalException,
|
||||
"DisabledOperationException": exceptions.DisabledOperationException,
|
||||
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
|
||||
"BaseException": exceptions.BaseException,
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
region = kwargs.pop('region', None)
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
|
||||
if 'host' not in kwargs or kwargs['host'] is None:
|
||||
kwargs['host'] = region.endpoint
|
||||
|
||||
super(CloudSearchConnection, self).__init__(**kwargs)
|
||||
self.region = region
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
|
||||
def build_suggesters(self, domain_name):
|
||||
"""
|
||||
Indexes the search suggestions.
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
return self._make_request(
|
||||
action='BuildSuggesters',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def create_domain(self, domain_name):
|
||||
"""
|
||||
Creates a new search domain. For more information, see
|
||||
`Creating a Search Domain`_ in the Amazon CloudSearch
|
||||
Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A name for the domain you are creating. Allowed
|
||||
characters are a-z (lower-case letters), 0-9, and hyphen (-).
|
||||
Domain names must start with a letter or number and be at least 3
|
||||
and no more than 28 characters long.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
return self._make_request(
|
||||
action='CreateDomain',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def define_analysis_scheme(self, domain_name, analysis_scheme):
|
||||
"""
|
||||
Configures an analysis scheme for a domain. An analysis scheme
|
||||
defines language-specific text processing options for a `text`
|
||||
field. For more information, see `Configuring Analysis
|
||||
Schemes`_ in the Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type analysis_scheme: dict
|
||||
:param analysis_scheme: Configuration information for an analysis
|
||||
scheme. Each analysis scheme has a unique name and specifies the
|
||||
language of the text to be processed. The following options can be
|
||||
configured for an analysis scheme: `Synonyms`, `Stopwords`,
|
||||
`StemmingDictionary`, and `AlgorithmicStemming`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
self.build_complex_param(params, 'AnalysisScheme',
|
||||
analysis_scheme)
|
||||
return self._make_request(
|
||||
action='DefineAnalysisScheme',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def define_expression(self, domain_name, expression):
|
||||
"""
|
||||
Configures an `Expression` for the search domain. Used to
|
||||
create new expressions and modify existing ones. If the
|
||||
expression exists, the new configuration replaces the old one.
|
||||
For more information, see `Configuring Expressions`_ in the
|
||||
Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type expression: dict
|
||||
:param expression: A named expression that can be evaluated at search
|
||||
time. Can be used for sorting and filtering search results and
|
||||
constructing other expressions.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
self.build_complex_param(params, 'Expression',
|
||||
expression)
|
||||
return self._make_request(
|
||||
action='DefineExpression',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def define_index_field(self, domain_name, index_field):
|
||||
"""
|
||||
Configures an `IndexField` for the search domain. Used to
|
||||
create new fields and modify existing ones. You must specify
|
||||
the name of the domain you are configuring and an index field
|
||||
configuration. The index field configuration specifies a
|
||||
unique name, the index field type, and the options you want to
|
||||
configure for the field. The options you can specify depend on
|
||||
the `IndexFieldType`. If the field exists, the new
|
||||
configuration replaces the old one. For more information, see
|
||||
`Configuring Index Fields`_ in the Amazon CloudSearch
|
||||
Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type index_field: dict
|
||||
:param index_field: The index field and field options you want to
|
||||
configure.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
self.build_complex_param(params, 'IndexField',
|
||||
index_field)
|
||||
return self._make_request(
|
||||
action='DefineIndexField',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def define_suggester(self, domain_name, suggester):
|
||||
"""
|
||||
Configures a suggester for a domain. A suggester enables you
|
||||
to display possible matches before users finish typing their
|
||||
queries. When you configure a suggester, you must specify the
|
||||
name of the text field you want to search for possible matches
|
||||
and a unique name for the suggester. For more information, see
|
||||
`Getting Search Suggestions`_ in the Amazon CloudSearch
|
||||
Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type suggester: dict
|
||||
:param suggester: Configuration information for a search suggester.
|
||||
Each suggester has a unique name and specifies the text field you
|
||||
want to use for suggestions. The following options can be
|
||||
configured for a suggester: `FuzzyMatching`, `SortExpression`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
self.build_complex_param(params, 'Suggester',
|
||||
suggester)
|
||||
return self._make_request(
|
||||
action='DefineSuggester',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def delete_analysis_scheme(self, domain_name, analysis_scheme_name):
|
||||
"""
|
||||
Deletes an analysis scheme. For more information, see
|
||||
`Configuring Analysis Schemes`_ in the Amazon CloudSearch
|
||||
Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type analysis_scheme_name: string
|
||||
:param analysis_scheme_name: The name of the analysis scheme you want
|
||||
to delete.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'DomainName': domain_name,
|
||||
'AnalysisSchemeName': analysis_scheme_name,
|
||||
}
|
||||
return self._make_request(
|
||||
action='DeleteAnalysisScheme',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def delete_domain(self, domain_name):
|
||||
"""
|
||||
Permanently deletes a search domain and all of its data. Once
|
||||
a domain has been deleted, it cannot be recovered. For more
|
||||
information, see `Deleting a Search Domain`_ in the Amazon
|
||||
CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to permanently
|
||||
delete.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
return self._make_request(
|
||||
action='DeleteDomain',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def delete_expression(self, domain_name, expression_name):
|
||||
"""
|
||||
Removes an `Expression` from the search domain. For more
|
||||
information, see `Configuring Expressions`_ in the Amazon
|
||||
CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type expression_name: string
|
||||
:param expression_name: The name of the `Expression` to delete.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'DomainName': domain_name,
|
||||
'ExpressionName': expression_name,
|
||||
}
|
||||
return self._make_request(
|
||||
action='DeleteExpression',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def delete_index_field(self, domain_name, index_field_name):
|
||||
"""
|
||||
Removes an `IndexField` from the search domain. For more
|
||||
information, see `Configuring Index Fields`_ in the Amazon
|
||||
CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type index_field_name: string
|
||||
:param index_field_name: The name of the index field your want to
|
||||
remove from the domain's indexing options.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'DomainName': domain_name,
|
||||
'IndexFieldName': index_field_name,
|
||||
}
|
||||
return self._make_request(
|
||||
action='DeleteIndexField',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def delete_suggester(self, domain_name, suggester_name):
|
||||
"""
|
||||
Deletes a suggester. For more information, see `Getting Search
|
||||
Suggestions`_ in the Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type suggester_name: string
|
||||
:param suggester_name: Specifies the name of the suggester you want to
|
||||
delete.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'DomainName': domain_name,
|
||||
'SuggesterName': suggester_name,
|
||||
}
|
||||
return self._make_request(
|
||||
action='DeleteSuggester',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_analysis_schemes(self, domain_name,
|
||||
analysis_scheme_names=None, deployed=None):
|
||||
"""
|
||||
Gets the analysis schemes configured for a domain. An analysis
|
||||
scheme defines language-specific text processing options for a
|
||||
`text` field. Can be limited to specific analysis schemes by
|
||||
name. By default, shows all analysis schemes and includes any
|
||||
pending changes to the configuration. Set the `Deployed`
|
||||
option to `True` to show the active configuration and exclude
|
||||
pending changes. For more information, see `Configuring
|
||||
Analysis Schemes`_ in the Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to describe.
|
||||
|
||||
:type analysis_scheme_names: list
|
||||
:param analysis_scheme_names: The analysis schemes you want to
|
||||
describe.
|
||||
|
||||
:type deployed: boolean
|
||||
:param deployed: Whether to display the deployed configuration (
|
||||
`True`) or include any pending changes ( `False`). Defaults to
|
||||
`False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
if analysis_scheme_names is not None:
|
||||
self.build_list_params(params,
|
||||
analysis_scheme_names,
|
||||
'AnalysisSchemeNames.member')
|
||||
if deployed is not None:
|
||||
params['Deployed'] = str(
|
||||
deployed).lower()
|
||||
return self._make_request(
|
||||
action='DescribeAnalysisSchemes',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_availability_options(self, domain_name, deployed=None):
|
||||
"""
|
||||
Gets the availability options configured for a domain. By
|
||||
default, shows the configuration with any pending changes. Set
|
||||
the `Deployed` option to `True` to show the active
|
||||
configuration and exclude pending changes. For more
|
||||
information, see `Configuring Availability Options`_ in the
|
||||
Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to describe.
|
||||
|
||||
:type deployed: boolean
|
||||
:param deployed: Whether to display the deployed configuration (
|
||||
`True`) or include any pending changes ( `False`). Defaults to
|
||||
`False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
if deployed is not None:
|
||||
params['Deployed'] = str(
|
||||
deployed).lower()
|
||||
return self._make_request(
|
||||
action='DescribeAvailabilityOptions',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_domains(self, domain_names=None):
|
||||
"""
|
||||
Gets information about the search domains owned by this
|
||||
account. Can be limited to specific domains. Shows all domains
|
||||
by default. For more information, see `Getting Information
|
||||
about a Search Domain`_ in the Amazon CloudSearch Developer
|
||||
Guide .
|
||||
|
||||
:type domain_names: list
|
||||
:param domain_names: The names of the domains you want to include in
|
||||
the response.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if domain_names is not None:
|
||||
self.build_list_params(params,
|
||||
domain_names,
|
||||
'DomainNames.member')
|
||||
return self._make_request(
|
||||
action='DescribeDomains',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_expressions(self, domain_name, expression_names=None,
|
||||
deployed=None):
|
||||
"""
|
||||
Gets the expressions configured for the search domain. Can be
|
||||
limited to specific expressions by name. By default, shows all
|
||||
expressions and includes any pending changes to the
|
||||
configuration. Set the `Deployed` option to `True` to show the
|
||||
active configuration and exclude pending changes. For more
|
||||
information, see `Configuring Expressions`_ in the Amazon
|
||||
CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to describe.
|
||||
|
||||
:type expression_names: list
|
||||
:param expression_names: Limits the `DescribeExpressions` response to
|
||||
the specified expressions. If not specified, all expressions are
|
||||
shown.
|
||||
|
||||
:type deployed: boolean
|
||||
:param deployed: Whether to display the deployed configuration (
|
||||
`True`) or include any pending changes ( `False`). Defaults to
|
||||
`False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
if expression_names is not None:
|
||||
self.build_list_params(params,
|
||||
expression_names,
|
||||
'ExpressionNames.member')
|
||||
if deployed is not None:
|
||||
params['Deployed'] = str(
|
||||
deployed).lower()
|
||||
return self._make_request(
|
||||
action='DescribeExpressions',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_index_fields(self, domain_name, field_names=None,
|
||||
deployed=None):
|
||||
"""
|
||||
Gets information about the index fields configured for the
|
||||
search domain. Can be limited to specific fields by name. By
|
||||
default, shows all fields and includes any pending changes to
|
||||
the configuration. Set the `Deployed` option to `True` to show
|
||||
the active configuration and exclude pending changes. For more
|
||||
information, see `Getting Domain Information`_ in the Amazon
|
||||
CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to describe.
|
||||
|
||||
:type field_names: list
|
||||
:param field_names: A list of the index fields you want to describe. If
|
||||
not specified, information is returned for all configured index
|
||||
fields.
|
||||
|
||||
:type deployed: boolean
|
||||
:param deployed: Whether to display the deployed configuration (
|
||||
`True`) or include any pending changes ( `False`). Defaults to
|
||||
`False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
if field_names is not None:
|
||||
self.build_list_params(params,
|
||||
field_names,
|
||||
'FieldNames.member')
|
||||
if deployed is not None:
|
||||
params['Deployed'] = str(
|
||||
deployed).lower()
|
||||
return self._make_request(
|
||||
action='DescribeIndexFields',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_scaling_parameters(self, domain_name):
|
||||
"""
|
||||
Gets the scaling parameters configured for a domain. A
|
||||
domain's scaling parameters specify the desired search
|
||||
instance type and replication count. For more information, see
|
||||
`Configuring Scaling Options`_ in the Amazon CloudSearch
|
||||
Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
return self._make_request(
|
||||
action='DescribeScalingParameters',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_service_access_policies(self, domain_name, deployed=None):
|
||||
"""
|
||||
Gets information about the access policies that control access
|
||||
to the domain's document and search endpoints. By default,
|
||||
shows the configuration with any pending changes. Set the
|
||||
`Deployed` option to `True` to show the active configuration
|
||||
and exclude pending changes. For more information, see
|
||||
`Configuring Access for a Search Domain`_ in the Amazon
|
||||
CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to describe.
|
||||
|
||||
:type deployed: boolean
|
||||
:param deployed: Whether to display the deployed configuration (
|
||||
`True`) or include any pending changes ( `False`). Defaults to
|
||||
`False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
if deployed is not None:
|
||||
params['Deployed'] = str(
|
||||
deployed).lower()
|
||||
return self._make_request(
|
||||
action='DescribeServiceAccessPolicies',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def describe_suggesters(self, domain_name, suggester_names=None,
|
||||
deployed=None):
|
||||
"""
|
||||
Gets the suggesters configured for a domain. A suggester
|
||||
enables you to display possible matches before users finish
|
||||
typing their queries. Can be limited to specific suggesters by
|
||||
name. By default, shows all suggesters and includes any
|
||||
pending changes to the configuration. Set the `Deployed`
|
||||
option to `True` to show the active configuration and exclude
|
||||
pending changes. For more information, see `Getting Search
|
||||
Suggestions`_ in the Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: The name of the domain you want to describe.
|
||||
|
||||
:type suggester_names: list
|
||||
:param suggester_names: The suggesters you want to describe.
|
||||
|
||||
:type deployed: boolean
|
||||
:param deployed: Whether to display the deployed configuration (
|
||||
`True`) or include any pending changes ( `False`). Defaults to
|
||||
`False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
if suggester_names is not None:
|
||||
self.build_list_params(params,
|
||||
suggester_names,
|
||||
'SuggesterNames.member')
|
||||
if deployed is not None:
|
||||
params['Deployed'] = str(
|
||||
deployed).lower()
|
||||
return self._make_request(
|
||||
action='DescribeSuggesters',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def index_documents(self, domain_name):
|
||||
"""
|
||||
Tells the search domain to start indexing its documents using
|
||||
the latest indexing options. This operation must be invoked to
|
||||
activate options whose OptionStatus is
|
||||
`RequiresIndexDocuments`.
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
return self._make_request(
|
||||
action='IndexDocuments',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def list_domain_names(self):
|
||||
"""
|
||||
Lists all search domains owned by an account.
|
||||
|
||||
|
||||
"""
|
||||
params = {}
|
||||
return self._make_request(
|
||||
action='ListDomainNames',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def update_availability_options(self, domain_name, multi_az):
|
||||
"""
|
||||
Configures the availability options for a domain. Enabling the
|
||||
Multi-AZ option expands an Amazon CloudSearch domain to an
|
||||
additional Availability Zone in the same Region to increase
|
||||
fault tolerance in the event of a service disruption. Changes
|
||||
to the Multi-AZ option can take about half an hour to become
|
||||
active. For more information, see `Configuring Availability
|
||||
Options`_ in the Amazon CloudSearch Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type multi_az: boolean
|
||||
:param multi_az: You expand an existing search domain to a second
|
||||
Availability Zone by setting the Multi-AZ option to true.
|
||||
Similarly, you can turn off the Multi-AZ option to downgrade the
|
||||
domain to a single Availability Zone by setting the Multi-AZ option
|
||||
to `False`.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, 'MultiAZ': multi_az, }
|
||||
return self._make_request(
|
||||
action='UpdateAvailabilityOptions',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def update_scaling_parameters(self, domain_name, scaling_parameters):
|
||||
"""
|
||||
Configures scaling parameters for a domain. A domain's scaling
|
||||
parameters specify the desired search instance type and
|
||||
replication count. Amazon CloudSearch will still automatically
|
||||
scale your domain based on the volume of data and traffic, but
|
||||
not below the desired instance type and replication count. If
|
||||
the Multi-AZ option is enabled, these values control the
|
||||
resources used per Availability Zone. For more information,
|
||||
see `Configuring Scaling Options`_ in the Amazon CloudSearch
|
||||
Developer Guide .
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type scaling_parameters: dict
|
||||
:param scaling_parameters: The desired instance type and desired number
|
||||
of replicas of each index partition.
|
||||
|
||||
"""
|
||||
params = {'DomainName': domain_name, }
|
||||
self.build_complex_param(params, 'ScalingParameters',
|
||||
scaling_parameters)
|
||||
return self._make_request(
|
||||
action='UpdateScalingParameters',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def update_service_access_policies(self, domain_name, access_policies):
|
||||
"""
|
||||
Configures the access rules that control access to the
|
||||
domain's document and search endpoints. For more information,
|
||||
see ` Configuring Access for an Amazon CloudSearch Domain`_.
|
||||
|
||||
:type domain_name: string
|
||||
:param domain_name: A string that represents the name of a domain.
|
||||
Domain names are unique across the domains owned by an account
|
||||
within an AWS region. Domain names start with a letter or number
|
||||
and can contain the following characters: a-z (lowercase), 0-9, and
|
||||
- (hyphen).
|
||||
|
||||
:type access_policies: string
|
||||
:param access_policies: The access rules you want to configure. These
|
||||
rules replace any existing rules.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'DomainName': domain_name,
|
||||
'AccessPolicies': access_policies,
|
||||
}
|
||||
return self._make_request(
|
||||
action='UpdateServiceAccessPolicies',
|
||||
verb='POST',
|
||||
path='/', params=params)
|
||||
|
||||
def build_complex_param(self, params, label, value):
|
||||
"""Serialize a structure.
|
||||
|
||||
For example::
|
||||
|
||||
param_type = 'structure'
|
||||
label = 'IndexField'
|
||||
value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}}
|
||||
|
||||
would result in the params dict being updated with these params::
|
||||
|
||||
IndexField.IndexFieldName = a
|
||||
IndexField.IntOptions.DefaultValue = 5
|
||||
|
||||
:type params: dict
|
||||
:param params: The params dict. The complex list params
|
||||
will be added to this dict.
|
||||
|
||||
:type label: str
|
||||
:param label: String label for param key
|
||||
|
||||
:type value: any
|
||||
:param value: The value to serialize
|
||||
"""
|
||||
for k, v in value.items():
|
||||
if isinstance(v, dict):
|
||||
for k2, v2 in v.items():
|
||||
self.build_complex_param(params, label + '.' + k, v)
|
||||
elif isinstance(v, bool):
|
||||
params['%s.%s' % (label, k)] = v and 'true' or 'false'
|
||||
else:
|
||||
params['%s.%s' % (label, k)] = v
|
||||
|
||||
def _make_request(self, action, verb, path, params):
|
||||
params['ContentType'] = 'JSON'
|
||||
response = self.make_request(action=action, verb='POST',
|
||||
path='/', params=params)
|
||||
body = response.read().decode('utf-8')
|
||||
boto.log.debug(body)
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
else:
|
||||
json_body = json.loads(body)
|
||||
fault_name = json_body.get('Error', {}).get('Code', None)
|
||||
exception_class = self._faults.get(fault_name, self.ResponseError)
|
||||
raise exception_class(response.status, response.reason,
|
||||
body=json_body)
|
||||
93
awx/lib/site-packages/boto/cloudsearch2/layer2.py
Normal file
93
awx/lib/site-packages/boto/cloudsearch2/layer2.py
Normal file
@ -0,0 +1,93 @@
|
||||
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from boto.cloudsearch2.layer1 import CloudSearchConnection
|
||||
from boto.cloudsearch2.domain import Domain
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class Layer2(object):
|
||||
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
host=None, debug=0, session_token=None, region=None,
|
||||
validate_certs=True):
|
||||
|
||||
if isinstance(region, six.string_types):
|
||||
import boto.cloudsearch2
|
||||
for region_info in boto.cloudsearch2.regions():
|
||||
if region_info.name == region:
|
||||
region = region_info
|
||||
break
|
||||
|
||||
self.layer1 = CloudSearchConnection(
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
is_secure=is_secure,
|
||||
port=port,
|
||||
proxy=proxy,
|
||||
proxy_port=proxy_port,
|
||||
host=host,
|
||||
debug=debug,
|
||||
security_token=session_token,
|
||||
region=region,
|
||||
validate_certs=validate_certs)
|
||||
|
||||
def list_domains(self, domain_names=None):
|
||||
"""
|
||||
Return a list of objects for each domain defined in the
|
||||
current account.
|
||||
:rtype: list of :class:`boto.cloudsearch2.domain.Domain`
|
||||
"""
|
||||
domain_data = self.layer1.describe_domains(domain_names)
|
||||
|
||||
domain_data = (domain_data['DescribeDomainsResponse']
|
||||
['DescribeDomainsResult']
|
||||
['DomainStatusList'])
|
||||
|
||||
return [Domain(self.layer1, data) for data in domain_data]
|
||||
|
||||
def create_domain(self, domain_name):
|
||||
"""
|
||||
Create a new CloudSearch domain and return the corresponding object.
|
||||
:return: Domain object, or None if the domain isn't found
|
||||
:rtype: :class:`boto.cloudsearch2.domain.Domain`
|
||||
"""
|
||||
data = self.layer1.create_domain(domain_name)
|
||||
return Domain(self.layer1, data['CreateDomainResponse']
|
||||
['CreateDomainResult']
|
||||
['DomainStatus'])
|
||||
|
||||
def lookup(self, domain_name):
|
||||
"""
|
||||
Lookup a single domain
|
||||
:param domain_name: The name of the domain to look up
|
||||
:type domain_name: str
|
||||
|
||||
:return: Domain object, or None if the domain isn't found
|
||||
:rtype: :class:`boto.cloudsearch2.domain.Domain`
|
||||
"""
|
||||
domains = self.list_domains(domain_names=[domain_name])
|
||||
if len(domains) > 0:
|
||||
return domains[0]
|
||||
233
awx/lib/site-packages/boto/cloudsearch2/optionstatus.py
Normal file
233
awx/lib/site-packages/boto/cloudsearch2/optionstatus.py
Normal file
@ -0,0 +1,233 @@
|
||||
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from boto.compat import json
|
||||
|
||||
|
||||
class OptionStatus(dict):
|
||||
"""
|
||||
Presents a combination of status field (defined below) which are
|
||||
accessed as attributes and option values which are stored in the
|
||||
native Python dictionary. In this class, the option values are
|
||||
merged from a JSON object that is stored as the Option part of
|
||||
the object.
|
||||
|
||||
:ivar domain_name: The name of the domain this option is associated with.
|
||||
:ivar create_date: A timestamp for when this option was created.
|
||||
:ivar state: The state of processing a change to an option.
|
||||
Possible values:
|
||||
|
||||
* RequiresIndexDocuments: the option's latest value will not
|
||||
be visible in searches until IndexDocuments has been called
|
||||
and indexing is complete.
|
||||
* Processing: the option's latest value is not yet visible in
|
||||
all searches but is in the process of being activated.
|
||||
* Active: the option's latest value is completely visible.
|
||||
|
||||
:ivar update_date: A timestamp for when this option was updated.
|
||||
:ivar update_version: A unique integer that indicates when this
|
||||
option was last updated.
|
||||
"""
|
||||
|
||||
def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None,
|
||||
save_fn=None):
|
||||
self.domain = domain
|
||||
self.refresh_fn = refresh_fn
|
||||
self.refresh_key = refresh_key
|
||||
self.save_fn = save_fn
|
||||
self.refresh(data)
|
||||
|
||||
def _update_status(self, status):
|
||||
self.creation_date = status['CreationDate']
|
||||
self.status = status['State']
|
||||
self.update_date = status['UpdateDate']
|
||||
self.update_version = int(status['UpdateVersion'])
|
||||
|
||||
def _update_options(self, options):
|
||||
if options:
|
||||
self.update(options)
|
||||
|
||||
def refresh(self, data=None):
|
||||
"""
|
||||
Refresh the local state of the object. You can either pass
|
||||
new state data in as the parameter ``data`` or, if that parameter
|
||||
is omitted, the state data will be retrieved from CloudSearch.
|
||||
"""
|
||||
if not data:
|
||||
if self.refresh_fn:
|
||||
data = self.refresh_fn(self.domain.name)
|
||||
|
||||
if data and self.refresh_key:
|
||||
# Attempt to pull out the right nested bag of data
|
||||
for key in self.refresh_key:
|
||||
data = data[key]
|
||||
if data:
|
||||
self._update_status(data['Status'])
|
||||
self._update_options(data['Options'])
|
||||
|
||||
def to_json(self):
|
||||
"""
|
||||
Return the JSON representation of the options as a string.
|
||||
"""
|
||||
return json.dumps(self)
|
||||
|
||||
def save(self):
|
||||
"""
|
||||
Write the current state of the local object back to the
|
||||
CloudSearch service.
|
||||
"""
|
||||
if self.save_fn:
|
||||
data = self.save_fn(self.domain.name, self.to_json())
|
||||
self.refresh(data)
|
||||
|
||||
|
||||
class IndexFieldStatus(OptionStatus):
|
||||
def save(self):
|
||||
pass
|
||||
|
||||
|
||||
class AvailabilityOptionsStatus(OptionStatus):
|
||||
def save(self):
|
||||
pass
|
||||
|
||||
|
||||
class ScalingParametersStatus(IndexFieldStatus):
|
||||
pass
|
||||
|
||||
|
||||
class ExpressionStatus(IndexFieldStatus):
|
||||
pass
|
||||
|
||||
|
||||
class ServicePoliciesStatus(OptionStatus):
|
||||
|
||||
def new_statement(self, arn, ip):
|
||||
"""
|
||||
Returns a new policy statement that will allow
|
||||
access to the service described by ``arn`` by the
|
||||
ip specified in ``ip``.
|
||||
|
||||
:type arn: string
|
||||
:param arn: The Amazon Resource Notation identifier for the
|
||||
service you wish to provide access to. This would be
|
||||
either the search service or the document service.
|
||||
|
||||
:type ip: string
|
||||
:param ip: An IP address or CIDR block you wish to grant access
|
||||
to.
|
||||
"""
|
||||
return {
|
||||
"Effect": "Allow",
|
||||
"Action": "*", # Docs say use GET, but denies unless *
|
||||
"Resource": arn,
|
||||
"Condition": {
|
||||
"IpAddress": {
|
||||
"aws:SourceIp": [ip]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _allow_ip(self, arn, ip):
|
||||
if 'Statement' not in self:
|
||||
s = self.new_statement(arn, ip)
|
||||
self['Statement'] = [s]
|
||||
self.save()
|
||||
else:
|
||||
add_statement = True
|
||||
for statement in self['Statement']:
|
||||
if statement['Resource'] == arn:
|
||||
for condition_name in statement['Condition']:
|
||||
if condition_name == 'IpAddress':
|
||||
add_statement = False
|
||||
condition = statement['Condition'][condition_name]
|
||||
if ip not in condition['aws:SourceIp']:
|
||||
condition['aws:SourceIp'].append(ip)
|
||||
|
||||
if add_statement:
|
||||
s = self.new_statement(arn, ip)
|
||||
self['Statement'].append(s)
|
||||
self.save()
|
||||
|
||||
def allow_search_ip(self, ip):
|
||||
"""
|
||||
Add the provided ip address or CIDR block to the list of
|
||||
allowable address for the search service.
|
||||
|
||||
:type ip: string
|
||||
:param ip: An IP address or CIDR block you wish to grant access
|
||||
to.
|
||||
"""
|
||||
arn = self.domain.service_arn
|
||||
self._allow_ip(arn, ip)
|
||||
|
||||
def allow_doc_ip(self, ip):
|
||||
"""
|
||||
Add the provided ip address or CIDR block to the list of
|
||||
allowable address for the document service.
|
||||
|
||||
:type ip: string
|
||||
:param ip: An IP address or CIDR block you wish to grant access
|
||||
to.
|
||||
"""
|
||||
arn = self.domain.service_arn
|
||||
self._allow_ip(arn, ip)
|
||||
|
||||
def _disallow_ip(self, arn, ip):
|
||||
if 'Statement' not in self:
|
||||
return
|
||||
need_update = False
|
||||
for statement in self['Statement']:
|
||||
if statement['Resource'] == arn:
|
||||
for condition_name in statement['Condition']:
|
||||
if condition_name == 'IpAddress':
|
||||
condition = statement['Condition'][condition_name]
|
||||
if ip in condition['aws:SourceIp']:
|
||||
condition['aws:SourceIp'].remove(ip)
|
||||
need_update = True
|
||||
if need_update:
|
||||
self.save()
|
||||
|
||||
def disallow_search_ip(self, ip):
|
||||
"""
|
||||
Remove the provided ip address or CIDR block from the list of
|
||||
allowable address for the search service.
|
||||
|
||||
:type ip: string
|
||||
:param ip: An IP address or CIDR block you wish to grant access
|
||||
to.
|
||||
"""
|
||||
arn = self.domain.service_arn
|
||||
self._disallow_ip(arn, ip)
|
||||
|
||||
def disallow_doc_ip(self, ip):
|
||||
"""
|
||||
Remove the provided ip address or CIDR block from the list of
|
||||
allowable address for the document service.
|
||||
|
||||
:type ip: string
|
||||
:param ip: An IP address or CIDR block you wish to grant access
|
||||
to.
|
||||
"""
|
||||
arn = self.domain.service_arn
|
||||
self._disallow_ip(arn, ip)
|
||||
363
awx/lib/site-packages/boto/cloudsearch2/search.py
Normal file
363
awx/lib/site-packages/boto/cloudsearch2/search.py
Normal file
@ -0,0 +1,363 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from math import ceil
|
||||
from boto.compat import json, map, six
|
||||
import requests
|
||||
|
||||
SIMPLE = 'simple'
|
||||
STRUCTURED = 'structured'
|
||||
LUCENE = 'lucene'
|
||||
DISMAX = 'dismax'
|
||||
|
||||
|
||||
class SearchServiceException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SearchResults(object):
|
||||
def __init__(self, **attrs):
|
||||
self.rid = attrs['status']['rid']
|
||||
self.time_ms = attrs['status']['time-ms']
|
||||
self.hits = attrs['hits']['found']
|
||||
self.docs = attrs['hits']['hit']
|
||||
self.start = attrs['hits']['start']
|
||||
self.query = attrs['query']
|
||||
self.search_service = attrs['search_service']
|
||||
|
||||
self.facets = {}
|
||||
if 'facets' in attrs:
|
||||
for (facet, values) in attrs['facets'].items():
|
||||
if 'buckets' in values:
|
||||
self.facets[facet] = dict((k, v) for (k, v) in map(lambda x: (x['value'], x['count']), values.get('buckets', [])))
|
||||
|
||||
self.num_pages_needed = ceil(self.hits / self.query.real_size)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.docs)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.docs)
|
||||
|
||||
def next_page(self):
|
||||
"""Call Cloudsearch to get the next page of search results
|
||||
|
||||
:rtype: :class:`boto.cloudsearch2.search.SearchResults`
|
||||
:return: the following page of search results
|
||||
"""
|
||||
if self.query.page <= self.num_pages_needed:
|
||||
self.query.start += self.query.real_size
|
||||
self.query.page += 1
|
||||
return self.search_service(self.query)
|
||||
else:
|
||||
raise StopIteration
|
||||
|
||||
|
||||
class Query(object):
|
||||
|
||||
RESULTS_PER_PAGE = 500
|
||||
|
||||
def __init__(self, q=None, parser=None, fq=None, expr=None,
|
||||
return_fields=None, size=10, start=0, sort=None,
|
||||
facet=None, highlight=None, partial=None, options=None):
|
||||
|
||||
self.q = q
|
||||
self.parser = parser
|
||||
self.fq = fq
|
||||
self.expr = expr or {}
|
||||
self.sort = sort or []
|
||||
self.return_fields = return_fields or []
|
||||
self.start = start
|
||||
self.facet = facet or {}
|
||||
self.highlight = highlight or {}
|
||||
self.partial = partial
|
||||
self.options = options
|
||||
self.page = 0
|
||||
self.update_size(size)
|
||||
|
||||
def update_size(self, new_size):
|
||||
self.size = new_size
|
||||
self.real_size = Query.RESULTS_PER_PAGE if (self.size >
|
||||
Query.RESULTS_PER_PAGE or self.size == 0) else self.size
|
||||
|
||||
def to_params(self):
|
||||
"""Transform search parameters from instance properties to a dictionary
|
||||
|
||||
:rtype: dict
|
||||
:return: search parameters
|
||||
"""
|
||||
params = {'start': self.start, 'size': self.real_size}
|
||||
|
||||
if self.q:
|
||||
params['q'] = self.q
|
||||
|
||||
if self.parser:
|
||||
params['q.parser'] = self.parser
|
||||
|
||||
if self.fq:
|
||||
params['fq'] = self.fq
|
||||
|
||||
if self.expr:
|
||||
for k, v in six.iteritems(self.expr):
|
||||
params['expr.%s' % k] = v
|
||||
|
||||
if self.facet:
|
||||
for k, v in six.iteritems(self.facet):
|
||||
if not isinstance(v, six.string_types):
|
||||
v = json.dumps(v)
|
||||
params['facet.%s' % k] = v
|
||||
|
||||
if self.highlight:
|
||||
for k, v in six.iteritems(self.highlight):
|
||||
params['highlight.%s' % k] = v
|
||||
|
||||
if self.options:
|
||||
params['options'] = self.options
|
||||
|
||||
if self.return_fields:
|
||||
params['return'] = ','.join(self.return_fields)
|
||||
|
||||
if self.partial is not None:
|
||||
params['partial'] = self.partial
|
||||
|
||||
if self.sort:
|
||||
params['sort'] = ','.join(self.sort)
|
||||
|
||||
return params
|
||||
|
||||
|
||||
class SearchConnection(object):
|
||||
|
||||
def __init__(self, domain=None, endpoint=None):
|
||||
self.domain = domain
|
||||
self.endpoint = endpoint
|
||||
self.session = requests.Session()
|
||||
|
||||
if not endpoint:
|
||||
self.endpoint = domain.search_service_endpoint
|
||||
|
||||
def build_query(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
|
||||
size=10, start=0, facet=None, highlight=None, sort=None,
|
||||
partial=None, options=None):
|
||||
return Query(q=q, parser=parser, fq=fq, expr=rank, return_fields=return_fields,
|
||||
size=size, start=start, facet=facet, highlight=highlight,
|
||||
sort=sort, partial=partial, options=options)
|
||||
|
||||
def search(self, q=None, parser=None, fq=None, rank=None, return_fields=None,
|
||||
size=10, start=0, facet=None, highlight=None, sort=None, partial=None,
|
||||
options=None):
|
||||
"""
|
||||
Send a query to CloudSearch
|
||||
|
||||
Each search query should use at least the q or bq argument to specify
|
||||
the search parameter. The other options are used to specify the
|
||||
criteria of the search.
|
||||
|
||||
:type q: string
|
||||
:param q: A string to search the default search fields for.
|
||||
|
||||
:type parser: string
|
||||
:param parser: The parser to use. 'simple', 'structured', 'lucene', 'dismax'
|
||||
|
||||
:type fq: string
|
||||
:param fq: The filter query to use.
|
||||
|
||||
:type sort: List of strings
|
||||
:param sort: A list of fields or rank expressions used to order the
|
||||
search results. Order is handled by adding 'desc' or 'asc' after the field name.
|
||||
``['year desc', 'author asc']``
|
||||
|
||||
:type return_fields: List of strings
|
||||
:param return_fields: A list of fields which should be returned by the
|
||||
search. If this field is not specified, only IDs will be returned.
|
||||
``['headline']``
|
||||
|
||||
:type size: int
|
||||
:param size: Number of search results to specify
|
||||
|
||||
:type start: int
|
||||
:param start: Offset of the first search result to return (can be used
|
||||
for paging)
|
||||
|
||||
:type facet: dict
|
||||
:param facet: Dictionary of fields for which facets should be returned
|
||||
The facet value is string of JSON options
|
||||
``{'year': '{sort:"bucket", size:3}', 'genres': '{buckets:["Action","Adventure","Sci-Fi"]}'}``
|
||||
|
||||
:type highlight: dict
|
||||
:param highlight: Dictionary of fields for which highlights should be returned
|
||||
The facet value is string of JSON options
|
||||
``{'genres': '{format:'text',max_phrases:2,pre_tag:'<b>',post_tag:'</b>'}'}``
|
||||
|
||||
:type partial: bool
|
||||
:param partial: Should partial results from a partioned service be returned if
|
||||
one or more index partitions are unreachable.
|
||||
|
||||
:type options: str
|
||||
:param options: Options for the query parser specified in *parser*.
|
||||
Specified as a string in JSON format.
|
||||
``{fields: ['title^5', 'description']}``
|
||||
|
||||
:rtype: :class:`boto.cloudsearch2.search.SearchResults`
|
||||
:return: Returns the results of this search
|
||||
|
||||
The following examples all assume we have indexed a set of documents
|
||||
with fields: *author*, *date*, *headline*
|
||||
|
||||
A simple search will look for documents whose default text search
|
||||
fields will contain the search word exactly:
|
||||
|
||||
>>> search(q='Tim') # Return documents with the word Tim in them (but not Timothy)
|
||||
|
||||
A simple search with more keywords will return documents whose default
|
||||
text search fields contain the search strings together or separately.
|
||||
|
||||
>>> search(q='Tim apple') # Will match "tim" and "apple"
|
||||
|
||||
More complex searches require the boolean search operator.
|
||||
|
||||
Wildcard searches can be used to search for any words that start with
|
||||
the search string.
|
||||
|
||||
>>> search(q="'Tim*'") # Return documents with words like Tim or Timothy)
|
||||
|
||||
Search terms can also be combined. Allowed operators are "and", "or",
|
||||
"not", "field", "optional", "token", "phrase", or "filter"
|
||||
|
||||
>>> search(q="(and 'Tim' (field author 'John Smith'))", parser='structured')
|
||||
|
||||
Facets allow you to show classification information about the search
|
||||
results. For example, you can retrieve the authors who have written
|
||||
about Tim with a max of 3
|
||||
|
||||
>>> search(q='Tim', facet={'Author': '{sort:"bucket", size:3}'})
|
||||
"""
|
||||
|
||||
query = self.build_query(q=q, parser=parser, fq=fq, rank=rank,
|
||||
return_fields=return_fields,
|
||||
size=size, start=start, facet=facet,
|
||||
highlight=highlight, sort=sort,
|
||||
partial=partial, options=options)
|
||||
return self(query)
|
||||
|
||||
def __call__(self, query):
|
||||
"""Make a call to CloudSearch
|
||||
|
||||
:type query: :class:`boto.cloudsearch2.search.Query`
|
||||
:param query: A group of search criteria
|
||||
|
||||
:rtype: :class:`boto.cloudsearch2.search.SearchResults`
|
||||
:return: search results
|
||||
"""
|
||||
api_version = '2013-01-01'
|
||||
if self.domain:
|
||||
api_version = self.domain.layer1.APIVersion
|
||||
url = "http://%s/%s/search" % (self.endpoint, api_version)
|
||||
params = query.to_params()
|
||||
|
||||
r = self.session.get(url, params=params)
|
||||
_body = r.content.decode('utf-8')
|
||||
try:
|
||||
data = json.loads(_body)
|
||||
except ValueError:
|
||||
if r.status_code == 403:
|
||||
msg = ''
|
||||
import re
|
||||
g = re.search('<html><body><h1>403 Forbidden</h1>([^<]+)<', _body)
|
||||
try:
|
||||
msg = ': %s' % (g.groups()[0].strip())
|
||||
except AttributeError:
|
||||
pass
|
||||
raise SearchServiceException('Authentication error from Amazon%s' % msg)
|
||||
raise SearchServiceException("Got non-json response from Amazon. %s" % _body, query)
|
||||
|
||||
if 'messages' in data and 'error' in data:
|
||||
for m in data['messages']:
|
||||
if m['severity'] == 'fatal':
|
||||
raise SearchServiceException("Error processing search %s "
|
||||
"=> %s" % (params, m['message']), query)
|
||||
elif 'error' in data:
|
||||
raise SearchServiceException("Unknown error processing search %s"
|
||||
% json.dumps(data), query)
|
||||
|
||||
data['query'] = query
|
||||
data['search_service'] = self
|
||||
|
||||
return SearchResults(**data)
|
||||
|
||||
def get_all_paged(self, query, per_page):
|
||||
"""Get a generator to iterate over all pages of search results
|
||||
|
||||
:type query: :class:`boto.cloudsearch2.search.Query`
|
||||
:param query: A group of search criteria
|
||||
|
||||
:type per_page: int
|
||||
:param per_page: Number of docs in each :class:`boto.cloudsearch2.search.SearchResults` object.
|
||||
|
||||
:rtype: generator
|
||||
:return: Generator containing :class:`boto.cloudsearch2.search.SearchResults`
|
||||
"""
|
||||
query.update_size(per_page)
|
||||
page = 0
|
||||
num_pages_needed = 0
|
||||
while page <= num_pages_needed:
|
||||
results = self(query)
|
||||
num_pages_needed = results.num_pages_needed
|
||||
yield results
|
||||
query.start += query.real_size
|
||||
page += 1
|
||||
|
||||
def get_all_hits(self, query):
|
||||
"""Get a generator to iterate over all search results
|
||||
|
||||
Transparently handles the results paging from Cloudsearch
|
||||
search results so even if you have many thousands of results
|
||||
you can iterate over all results in a reasonably efficient
|
||||
manner.
|
||||
|
||||
:type query: :class:`boto.cloudsearch2.search.Query`
|
||||
:param query: A group of search criteria
|
||||
|
||||
:rtype: generator
|
||||
:return: All docs matching query
|
||||
"""
|
||||
page = 0
|
||||
num_pages_needed = 0
|
||||
while page <= num_pages_needed:
|
||||
results = self(query)
|
||||
num_pages_needed = results.num_pages_needed
|
||||
for doc in results:
|
||||
yield doc
|
||||
query.start += query.real_size
|
||||
page += 1
|
||||
|
||||
def get_num_hits(self, query):
|
||||
"""Return the total number of hits for query
|
||||
|
||||
:type query: :class:`boto.cloudsearch2.search.Query`
|
||||
:param query: a group of search criteria
|
||||
|
||||
:rtype: int
|
||||
:return: Total number of hits for query
|
||||
"""
|
||||
query.update_size(1)
|
||||
return self(query).hits
|
||||
@ -20,16 +20,12 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.cloudtrail import exceptions
|
||||
from boto.compat import json
|
||||
|
||||
|
||||
class CloudTrailConnection(AWSQueryConnection):
|
||||
@ -344,7 +340,7 @@ class CloudTrailConnection(AWSQueryConnection):
|
||||
headers=headers, data=body)
|
||||
response = self._mexe(http_request, sender=None,
|
||||
override_num_retries=10)
|
||||
response_body = response.read()
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response_body)
|
||||
if response.status == 200:
|
||||
if response_body:
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
import os
|
||||
|
||||
# This allows boto modules to say "from boto.compat import json". This is
|
||||
# preferred so that all modules don't have to repeat this idiom.
|
||||
@ -26,3 +27,41 @@ try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
|
||||
# Switch to use encodebytes, which deprecates encodestring in Python 3
|
||||
try:
|
||||
from base64 import encodebytes
|
||||
except ImportError:
|
||||
from base64 import encodestring as encodebytes
|
||||
|
||||
|
||||
# If running in Google App Engine there is no "user" and
|
||||
# os.path.expanduser() will fail. Attempt to detect this case and use a
|
||||
# no-op expanduser function in this case.
|
||||
try:
|
||||
os.path.expanduser('~')
|
||||
expanduser = os.path.expanduser
|
||||
except (AttributeError, ImportError):
|
||||
# This is probably running on App Engine.
|
||||
expanduser = (lambda x: x)
|
||||
|
||||
from boto.vendored import six
|
||||
|
||||
from boto.vendored.six import BytesIO, StringIO
|
||||
from boto.vendored.six.moves import filter, http_client, map, _thread, \
|
||||
urllib, zip
|
||||
from boto.vendored.six.moves.queue import Queue
|
||||
from boto.vendored.six.moves.urllib.parse import parse_qs, quote, unquote, \
|
||||
urlparse, urlsplit
|
||||
from boto.vendored.six.moves.urllib.request import urlopen
|
||||
|
||||
if six.PY3:
|
||||
# StandardError was removed, so use the base exception type instead
|
||||
StandardError = Exception
|
||||
long_type = int
|
||||
from configparser import ConfigParser
|
||||
else:
|
||||
StandardError = StandardError
|
||||
long_type = long
|
||||
from ConfigParser import SafeConfigParser as ConfigParser
|
||||
|
||||
@ -42,32 +42,26 @@
|
||||
"""
|
||||
Handles basic connections to AWS
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
import base64
|
||||
from datetime import datetime
|
||||
import errno
|
||||
import httplib
|
||||
import os
|
||||
import Queue
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
import xml.sax
|
||||
import copy
|
||||
|
||||
import auth
|
||||
import auth_handler
|
||||
from boto import auth
|
||||
from boto import auth_handler
|
||||
import boto
|
||||
import boto.utils
|
||||
import boto.handler
|
||||
import boto.cacerts
|
||||
|
||||
from boto import config, UserAgent
|
||||
from boto.compat import six, http_client, urlparse, quote, encodebytes
|
||||
from boto.exception import AWSConnectionError
|
||||
from boto.exception import BotoClientError
|
||||
from boto.exception import BotoServerError
|
||||
@ -165,7 +159,7 @@ class HostConnectionPool(object):
|
||||
|
||||
def _conn_ready(self, conn):
|
||||
"""
|
||||
There is a nice state diagram at the top of httplib.py. It
|
||||
There is a nice state diagram at the top of http_client.py. It
|
||||
indicates that once the response headers have been read (which
|
||||
_mexe does before adding the connection to the pool), a
|
||||
response is attached to the connection, and it stays there
|
||||
@ -370,11 +364,13 @@ class HTTPRequest(object):
|
||||
self.headers, self.body))
|
||||
|
||||
def authorize(self, connection, **kwargs):
|
||||
for key in self.headers:
|
||||
val = self.headers[key]
|
||||
if isinstance(val, unicode):
|
||||
safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~'
|
||||
self.headers[key] = urllib.quote_plus(val.encode('utf-8'), safe)
|
||||
if not getattr(self, '_headers_quoted', False):
|
||||
for key in self.headers:
|
||||
val = self.headers[key]
|
||||
if isinstance(val, six.text_type):
|
||||
safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~'
|
||||
self.headers[key] = quote(val.encode('utf-8'), safe)
|
||||
setattr(self, '_headers_quoted', True)
|
||||
|
||||
connection._auth_handler.add_auth(self, **kwargs)
|
||||
|
||||
@ -384,20 +380,20 @@ class HTTPRequest(object):
|
||||
if 'Content-Length' not in self.headers:
|
||||
if 'Transfer-Encoding' not in self.headers or \
|
||||
self.headers['Transfer-Encoding'] != 'chunked':
|
||||
self.headers['Content-Length'] = str(len(self.body))
|
||||
self.headers['Content-Length'] = len(self.body)
|
||||
|
||||
|
||||
class HTTPResponse(httplib.HTTPResponse):
|
||||
class HTTPResponse(http_client.HTTPResponse):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
httplib.HTTPResponse.__init__(self, *args, **kwargs)
|
||||
http_client.HTTPResponse.__init__(self, *args, **kwargs)
|
||||
self._cached_response = ''
|
||||
|
||||
def read(self, amt=None):
|
||||
"""Read the response.
|
||||
|
||||
This method does not have the same behavior as
|
||||
httplib.HTTPResponse.read. Instead, if this method is called with
|
||||
http_client.HTTPResponse.read. Instead, if this method is called with
|
||||
no ``amt`` arg, then the response body will be cached. Subsequent
|
||||
calls to ``read()`` with no args **will return the cached response**.
|
||||
|
||||
@ -410,10 +406,10 @@ class HTTPResponse(httplib.HTTPResponse):
|
||||
# will return the full body. Note that this behavior only
|
||||
# happens if the amt arg is not specified.
|
||||
if not self._cached_response:
|
||||
self._cached_response = httplib.HTTPResponse.read(self)
|
||||
self._cached_response = http_client.HTTPResponse.read(self)
|
||||
return self._cached_response
|
||||
else:
|
||||
return httplib.HTTPResponse.read(self, amt)
|
||||
return http_client.HTTPResponse.read(self, amt)
|
||||
|
||||
|
||||
class AWSAuthConnection(object):
|
||||
@ -446,7 +442,7 @@ class AWSAuthConnection(object):
|
||||
:type https_connection_factory: list or tuple
|
||||
:param https_connection_factory: A pair of an HTTP connection
|
||||
factory and the exceptions to catch. The factory should have
|
||||
a similar interface to L{httplib.HTTPSConnection}.
|
||||
a similar interface to L{http_client.HTTPSConnection}.
|
||||
|
||||
:param str proxy: Address/hostname for a proxy server
|
||||
|
||||
@ -505,9 +501,9 @@ class AWSAuthConnection(object):
|
||||
self.port = PORTS_BY_SECURITY[is_secure]
|
||||
|
||||
self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
|
||||
# define exceptions from httplib that we want to catch and retry
|
||||
self.http_exceptions = (httplib.HTTPException, socket.error,
|
||||
socket.gaierror, httplib.BadStatusLine)
|
||||
# define exceptions from http_client that we want to catch and retry
|
||||
self.http_exceptions = (http_client.HTTPException, socket.error,
|
||||
socket.gaierror, http_client.BadStatusLine)
|
||||
# define subclasses of the above that are not retryable.
|
||||
self.http_unretryable_exceptions = []
|
||||
if HAVE_HTTPS_CONNECTION:
|
||||
@ -528,12 +524,12 @@ class AWSAuthConnection(object):
|
||||
self.host = host
|
||||
self.path = path
|
||||
# if the value passed in for debug
|
||||
if not isinstance(debug, (int, long)):
|
||||
if not isinstance(debug, six.integer_types):
|
||||
debug = 0
|
||||
self.debug = config.getint('Boto', 'debug', debug)
|
||||
self.host_header = None
|
||||
|
||||
# Timeout used to tell httplib how long to wait for socket timeouts.
|
||||
# Timeout used to tell http_client how long to wait for socket timeouts.
|
||||
# Default is to leave timeout unchanged, which will in turn result in
|
||||
# the socket's default global timeout being used. To specify a
|
||||
# timeout, set http_socket_timeout in Boto config. Regardless,
|
||||
@ -651,7 +647,7 @@ class AWSAuthConnection(object):
|
||||
signature_host = self.host
|
||||
else:
|
||||
# This unfortunate little hack can be attributed to
|
||||
# a difference in the 2.6 version of httplib. In old
|
||||
# a difference in the 2.6 version of http_client. In old
|
||||
# versions, it would append ":443" to the hostname sent
|
||||
# in the Host header and so we needed to make sure we
|
||||
# did the same when calculating the V2 signature. In 2.6
|
||||
@ -693,8 +689,8 @@ class AWSAuthConnection(object):
|
||||
self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
|
||||
|
||||
if not self.proxy_port and self.proxy:
|
||||
print "http_proxy environment variable does not specify " \
|
||||
"a port, using default"
|
||||
print("http_proxy environment variable does not specify " \
|
||||
"a port, using default")
|
||||
self.proxy_port = self.port
|
||||
|
||||
self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
|
||||
@ -755,7 +751,7 @@ class AWSAuthConnection(object):
|
||||
host, ca_certs=self.ca_certificates_file,
|
||||
**http_connection_kwargs)
|
||||
else:
|
||||
connection = httplib.HTTPSConnection(host,
|
||||
connection = http_client.HTTPSConnection(host,
|
||||
**http_connection_kwargs)
|
||||
else:
|
||||
boto.log.debug('establishing HTTP connection: kwargs=%s' %
|
||||
@ -766,7 +762,7 @@ class AWSAuthConnection(object):
|
||||
connection = self.https_connection_factory(host,
|
||||
**http_connection_kwargs)
|
||||
else:
|
||||
connection = httplib.HTTPConnection(host,
|
||||
connection = http_client.HTTPConnection(host,
|
||||
**http_connection_kwargs)
|
||||
if self.debug > 1:
|
||||
connection.set_debuglevel(self.debug)
|
||||
@ -788,13 +784,13 @@ class AWSAuthConnection(object):
|
||||
host = '%s:%d' % (host, port)
|
||||
else:
|
||||
host = '%s:%d' % (self.host, self.port)
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
sock.connect((self.proxy, int(self.proxy_port)))
|
||||
if "timeout" in self.http_connection_kwargs:
|
||||
sock.settimeout(self.http_connection_kwargs["timeout"])
|
||||
except:
|
||||
raise
|
||||
# Seems properly to use timeout for connect too
|
||||
timeout = self.http_connection_kwargs.get("timeout")
|
||||
if timeout is not None:
|
||||
sock = socket.create_connection((self.proxy,
|
||||
int(self.proxy_port)), timeout)
|
||||
else:
|
||||
sock = socket.create_connection((self.proxy, int(self.proxy_port)))
|
||||
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
|
||||
sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
|
||||
sock.sendall("User-Agent: %s\r\n" % UserAgent)
|
||||
@ -807,7 +803,7 @@ class AWSAuthConnection(object):
|
||||
sock.sendall("\r\n")
|
||||
else:
|
||||
sock.sendall("\r\n")
|
||||
resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug)
|
||||
resp = http_client.HTTPResponse(sock, strict=True, debuglevel=self.debug)
|
||||
resp.begin()
|
||||
|
||||
if resp.status != 200:
|
||||
@ -821,7 +817,7 @@ class AWSAuthConnection(object):
|
||||
# We can safely close the response, it duped the original socket
|
||||
resp.close()
|
||||
|
||||
h = httplib.HTTPConnection(host)
|
||||
h = http_client.HTTPConnection(host)
|
||||
|
||||
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
|
||||
msg = "wrapping ssl socket for proxied connection; "
|
||||
@ -843,11 +839,11 @@ class AWSAuthConnection(object):
|
||||
hostname, cert, 'hostname mismatch')
|
||||
else:
|
||||
# Fallback for old Python without ssl.wrap_socket
|
||||
if hasattr(httplib, 'ssl'):
|
||||
sslSock = httplib.ssl.SSLSocket(sock)
|
||||
if hasattr(http_client, 'ssl'):
|
||||
sslSock = http_client.ssl.SSLSocket(sock)
|
||||
else:
|
||||
sslSock = socket.ssl(sock, None, None)
|
||||
sslSock = httplib.FakeSocket(sock, sslSock)
|
||||
sslSock = http_client.FakeSocket(sock, sslSock)
|
||||
|
||||
# This is a bit unclean
|
||||
h.sock = sslSock
|
||||
@ -858,7 +854,7 @@ class AWSAuthConnection(object):
|
||||
return path
|
||||
|
||||
def get_proxy_auth_header(self):
|
||||
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
|
||||
auth = encodebytes(self.proxy_user + ':' + self.proxy_pass)
|
||||
return {'Proxy-Authorization': 'Basic %s' % auth}
|
||||
|
||||
def set_host_header(self, request):
|
||||
@ -899,9 +895,16 @@ class AWSAuthConnection(object):
|
||||
i = 0
|
||||
connection = self.get_http_connection(request.host, request.port,
|
||||
self.is_secure)
|
||||
|
||||
# Convert body to bytes if needed
|
||||
if not isinstance(request.body, bytes) and hasattr(request.body,
|
||||
'encode'):
|
||||
request.body = request.body.encode('utf-8')
|
||||
|
||||
while i <= num_retries:
|
||||
# Use binary exponential backoff to desynchronize client requests.
|
||||
next_sleep = random.random() * (2 ** i)
|
||||
next_sleep = min(random.random() * (2 ** i),
|
||||
boto.config.get('Boto', 'max_retry_delay', 60))
|
||||
try:
|
||||
# we now re-sign each request before it is retried
|
||||
boto.log.debug('Token: %s' % self.provider.security_token)
|
||||
@ -913,6 +916,7 @@ class AWSAuthConnection(object):
|
||||
if 's3' not in self._required_auth_capability():
|
||||
if not getattr(self, 'anon', False):
|
||||
self.set_host_header(request)
|
||||
boto.log.debug('Final headers: %s' % request.headers)
|
||||
request.start_time = datetime.now()
|
||||
if callable(sender):
|
||||
response = sender(connection, request.method, request.path,
|
||||
@ -921,9 +925,10 @@ class AWSAuthConnection(object):
|
||||
connection.request(request.method, request.path,
|
||||
request.body, request.headers)
|
||||
response = connection.getresponse()
|
||||
boto.log.debug('Response headers: %s' % response.getheaders())
|
||||
location = response.getheader('location')
|
||||
# -- gross hack --
|
||||
# httplib gets confused with chunked responses to HEAD requests
|
||||
# http_client gets confused with chunked responses to HEAD requests
|
||||
# so I have to fake it out
|
||||
if request.method == 'HEAD' and getattr(response,
|
||||
'chunked', False):
|
||||
@ -941,6 +946,8 @@ class AWSAuthConnection(object):
|
||||
msg += 'Retrying in %3.1f seconds' % next_sleep
|
||||
boto.log.debug(msg)
|
||||
body = response.read()
|
||||
if isinstance(body, bytes):
|
||||
body = body.decode('utf-8')
|
||||
elif response.status < 300 or response.status >= 400 or \
|
||||
not location:
|
||||
# don't return connection to the pool if response contains
|
||||
@ -959,7 +966,7 @@ class AWSAuthConnection(object):
|
||||
return response
|
||||
else:
|
||||
scheme, request.host, request.path, \
|
||||
params, query, fragment = urlparse.urlparse(location)
|
||||
params, query, fragment = urlparse(location)
|
||||
if query:
|
||||
request.path += '?' + query
|
||||
# urlparse can return both host and port in netloc, so if
|
||||
@ -974,12 +981,12 @@ class AWSAuthConnection(object):
|
||||
scheme == 'https')
|
||||
response = None
|
||||
continue
|
||||
except PleaseRetryException, e:
|
||||
except PleaseRetryException as e:
|
||||
boto.log.debug('encountered a retry exception: %s' % e)
|
||||
connection = self.new_http_connection(request.host, request.port,
|
||||
self.is_secure)
|
||||
response = e.response
|
||||
except self.http_exceptions, e:
|
||||
except self.http_exceptions as e:
|
||||
for unretryable in self.http_unretryable_exceptions:
|
||||
if isinstance(e, unretryable):
|
||||
boto.log.debug(
|
||||
@ -1089,7 +1096,7 @@ class AWSQueryConnection(AWSAuthConnection):
|
||||
return self._mexe(http_request)
|
||||
|
||||
def build_list_params(self, params, items, label):
|
||||
if isinstance(items, basestring):
|
||||
if isinstance(items, six.string_types):
|
||||
items = [items]
|
||||
for i in range(1, len(items) + 1):
|
||||
params['%s.%d' % (label, i)] = items[i - 1]
|
||||
@ -1149,6 +1156,8 @@ class AWSQueryConnection(AWSAuthConnection):
|
||||
elif response.status == 200:
|
||||
rs = ResultSet(markers)
|
||||
h = boto.handler.XmlHandler(rs, parent)
|
||||
if isinstance(body, six.text_type):
|
||||
body = body.encode('utf-8')
|
||||
xml.sax.parseString(body, h)
|
||||
return rs
|
||||
else:
|
||||
@ -1169,6 +1178,8 @@ class AWSQueryConnection(AWSAuthConnection):
|
||||
elif response.status == 200:
|
||||
obj = cls(parent)
|
||||
h = boto.handler.XmlHandler(obj, parent)
|
||||
if isinstance(body, six.text_type):
|
||||
body = body.encode('utf-8')
|
||||
xml.sax.parseString(body, h)
|
||||
return obj
|
||||
else:
|
||||
|
||||
@ -627,7 +627,7 @@ class DataPipelineConnection(AWSQueryConnection):
|
||||
headers=headers, data=body)
|
||||
response = self._mexe(http_request, sender=None,
|
||||
override_num_retries=10)
|
||||
response_body = response.read()
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response_body)
|
||||
if response.status == 200:
|
||||
if response_body:
|
||||
|
||||
@ -20,16 +20,12 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.directconnect import exceptions
|
||||
from boto.compat import json
|
||||
|
||||
|
||||
class DirectConnectConnection(AWSQueryConnection):
|
||||
@ -619,7 +615,7 @@ class DirectConnectConnection(AWSQueryConnection):
|
||||
headers=headers, data=body)
|
||||
response = self._mexe(http_request, sender=None,
|
||||
override_num_retries=10)
|
||||
response_body = response.read()
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response_body)
|
||||
if response.status == 200:
|
||||
if response_body:
|
||||
|
||||
@ -20,6 +20,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class Batch(object):
|
||||
@ -176,7 +177,7 @@ class BatchList(list):
|
||||
if not self.unprocessed:
|
||||
return None
|
||||
|
||||
for table_name, table_req in self.unprocessed.iteritems():
|
||||
for table_name, table_req in six.iteritems(self.unprocessed):
|
||||
table_keys = table_req['Keys']
|
||||
table = self.layer2.get_table(table_name)
|
||||
|
||||
@ -196,7 +197,6 @@ class BatchList(list):
|
||||
|
||||
return self.submit()
|
||||
|
||||
|
||||
def submit(self):
|
||||
res = self.layer2.batch_get_item(self)
|
||||
if 'UnprocessedKeys' in res:
|
||||
@ -259,4 +259,3 @@ class BatchWriteList(list):
|
||||
table_name, batch_dict = batch.to_dict()
|
||||
d[table_name] = batch_dict
|
||||
return d
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ class Item(dict):
|
||||
:ivar range_key_name: The name of the RangeKey associated with this item.
|
||||
:ivar table: The Table this item belongs to.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, table, hash_key=None, range_key=None, attrs=None):
|
||||
self.table = table
|
||||
self._updates = None
|
||||
|
||||
@ -122,14 +122,14 @@ class Layer1(AWSAuthConnection):
|
||||
boto.log.debug('RequestId: %s' % request_id)
|
||||
boto.perflog.debug('%s: id=%s time=%sms',
|
||||
headers['X-Amz-Target'], request_id, int(elapsed))
|
||||
response_body = response.read()
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response_body)
|
||||
return json.loads(response_body, object_hook=object_hook)
|
||||
|
||||
def _retry_handler(self, response, i, next_sleep):
|
||||
status = None
|
||||
if response.status == 400:
|
||||
response_body = response.read()
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response_body)
|
||||
data = json.loads(response_body)
|
||||
if self.ThruputError in data.get('__type'):
|
||||
@ -160,7 +160,7 @@ class Layer1(AWSAuthConnection):
|
||||
expected_crc32 = response.getheader('x-amz-crc32')
|
||||
if self._validate_checksums and expected_crc32 is not None:
|
||||
boto.log.debug('Validating crc32 checksum for body: %s',
|
||||
response.read())
|
||||
response.read().decode('utf-8'))
|
||||
actual_crc32 = crc32(response.read()) & 0xffffffff
|
||||
expected_crc32 = int(expected_crc32)
|
||||
if actual_crc32 != expected_crc32:
|
||||
@ -173,7 +173,8 @@ class Layer1(AWSAuthConnection):
|
||||
if i == 0:
|
||||
next_sleep = 0
|
||||
else:
|
||||
next_sleep = 0.05 * (2 ** i)
|
||||
next_sleep = min(0.05 * (2 ** i),
|
||||
boto.config.get('Boto', 'max_retry_delay', 60))
|
||||
return next_sleep
|
||||
|
||||
def list_tables(self, limit=None, start_table=None):
|
||||
|
||||
@ -264,13 +264,13 @@ class Layer2(object):
|
||||
"""
|
||||
dynamodb_key = {}
|
||||
dynamodb_value = self.dynamizer.encode(hash_key)
|
||||
if dynamodb_value.keys()[0] != schema.hash_key_type:
|
||||
if list(dynamodb_value.keys())[0] != schema.hash_key_type:
|
||||
msg = 'Hashkey must be of type: %s' % schema.hash_key_type
|
||||
raise TypeError(msg)
|
||||
dynamodb_key['HashKeyElement'] = dynamodb_value
|
||||
if range_key is not None:
|
||||
dynamodb_value = self.dynamizer.encode(range_key)
|
||||
if dynamodb_value.keys()[0] != schema.range_key_type:
|
||||
if list(dynamodb_value.keys())[0] != schema.range_key_type:
|
||||
msg = 'RangeKey must be of type: %s' % schema.range_key_type
|
||||
raise TypeError(msg)
|
||||
dynamodb_key['RangeKeyElement'] = dynamodb_value
|
||||
|
||||
@ -47,9 +47,9 @@ class TableBatchGenerator(object):
|
||||
self.consistent_read = consistent_read
|
||||
|
||||
def _queue_unprocessed(self, res):
|
||||
if not u'UnprocessedKeys' in res:
|
||||
if u'UnprocessedKeys' not in res:
|
||||
return
|
||||
if not self.table.name in res[u'UnprocessedKeys']:
|
||||
if self.table.name not in res[u'UnprocessedKeys']:
|
||||
return
|
||||
|
||||
keys = res[u'UnprocessedKeys'][self.table.name][u'Keys']
|
||||
@ -68,7 +68,7 @@ class TableBatchGenerator(object):
|
||||
res = batch.submit()
|
||||
|
||||
# parse the results
|
||||
if not self.table.name in res[u'Responses']:
|
||||
if self.table.name not in res[u'Responses']:
|
||||
continue
|
||||
self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits']
|
||||
for elem in res[u'Responses'][self.table.name][u'Items']:
|
||||
|
||||
@ -27,7 +27,8 @@ Python types and vice-versa.
|
||||
import base64
|
||||
from decimal import (Decimal, DecimalException, Context,
|
||||
Clamped, Overflow, Inexact, Underflow, Rounded)
|
||||
from exceptions import DynamoDBNumberError
|
||||
from boto.dynamodb.exceptions import DynamoDBNumberError
|
||||
from boto.compat import filter, map, six, long_type
|
||||
|
||||
|
||||
DYNAMODB_CONTEXT = Context(
|
||||
@ -51,17 +52,25 @@ def float_to_decimal(f):
|
||||
|
||||
|
||||
def is_num(n):
|
||||
types = (int, long, float, bool, Decimal)
|
||||
types = (int, long_type, float, bool, Decimal)
|
||||
return isinstance(n, types) or n in types
|
||||
|
||||
|
||||
def is_str(n):
|
||||
return isinstance(n, basestring) or (isinstance(n, type) and
|
||||
issubclass(n, basestring))
|
||||
if six.PY2:
|
||||
def is_str(n):
|
||||
return (isinstance(n, basestring) or
|
||||
isinstance(n, type) and issubclass(n, basestring))
|
||||
|
||||
def is_binary(n):
|
||||
return isinstance(n, Binary)
|
||||
|
||||
def is_binary(n):
|
||||
return isinstance(n, Binary)
|
||||
else: # PY3
|
||||
def is_str(n):
|
||||
return (isinstance(n, str) or
|
||||
isinstance(n, type) and issubclass(n, str))
|
||||
|
||||
def is_binary(n):
|
||||
return isinstance(n, bytes) # Binary is subclass of bytes.
|
||||
|
||||
|
||||
def serialize_num(val):
|
||||
@ -103,7 +112,7 @@ def get_dynamodb_type(val):
|
||||
dynamodb_type = 'SS'
|
||||
elif False not in map(is_binary, val):
|
||||
dynamodb_type = 'BS'
|
||||
elif isinstance(val, Binary):
|
||||
elif is_binary(val):
|
||||
dynamodb_type = 'B'
|
||||
if dynamodb_type is None:
|
||||
msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
|
||||
@ -124,43 +133,62 @@ def dynamize_value(val):
|
||||
elif dynamodb_type == 'S':
|
||||
val = {dynamodb_type: val}
|
||||
elif dynamodb_type == 'NS':
|
||||
val = {dynamodb_type: map(serialize_num, val)}
|
||||
val = {dynamodb_type: list(map(serialize_num, val))}
|
||||
elif dynamodb_type == 'SS':
|
||||
val = {dynamodb_type: [n for n in val]}
|
||||
elif dynamodb_type == 'B':
|
||||
if isinstance(val, bytes):
|
||||
val = Binary(val)
|
||||
val = {dynamodb_type: val.encode()}
|
||||
elif dynamodb_type == 'BS':
|
||||
val = {dynamodb_type: [n.encode() for n in val]}
|
||||
return val
|
||||
|
||||
|
||||
class Binary(object):
|
||||
def __init__(self, value):
|
||||
if not isinstance(value, basestring):
|
||||
raise TypeError('Value must be a string of binary data!')
|
||||
if six.PY2:
|
||||
class Binary(object):
|
||||
def __init__(self, value):
|
||||
if not isinstance(value, (bytes, six.text_type)):
|
||||
raise TypeError('Value must be a string of binary data!')
|
||||
if not isinstance(value, bytes):
|
||||
value = value.encode("utf-8")
|
||||
|
||||
self.value = value
|
||||
self.value = value
|
||||
|
||||
def encode(self):
|
||||
return base64.b64encode(self.value)
|
||||
def encode(self):
|
||||
return base64.b64encode(self.value).decode('utf-8')
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Binary):
|
||||
return self.value == other.value
|
||||
else:
|
||||
return self.value == other
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Binary):
|
||||
return self.value == other.value
|
||||
else:
|
||||
return self.value == other
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Binary(%s)' % self.value
|
||||
def __repr__(self):
|
||||
return 'Binary(%r)' % self.value
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.value)
|
||||
def __hash__(self):
|
||||
return hash(self.value)
|
||||
else:
|
||||
class Binary(bytes):
|
||||
def encode(self):
|
||||
return base64.b64encode(self).decode('utf-8')
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
# This matches the public API of the Python 2 version,
|
||||
# but just returns itself since it is already a bytes
|
||||
# instance.
|
||||
return bytes(self)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Binary(%r)' % self.value
|
||||
|
||||
|
||||
def item_object_hook(dct):
|
||||
@ -244,28 +272,30 @@ class Dynamizer(object):
|
||||
n = str(float_to_decimal(attr))
|
||||
else:
|
||||
n = str(DYNAMODB_CONTEXT.create_decimal(attr))
|
||||
if filter(lambda x: x in n, ('Infinity', 'NaN')):
|
||||
if list(filter(lambda x: x in n, ('Infinity', 'NaN'))):
|
||||
raise TypeError('Infinity and NaN not supported')
|
||||
return n
|
||||
except (TypeError, DecimalException), e:
|
||||
except (TypeError, DecimalException) as e:
|
||||
msg = '{0} numeric for `{1}`\n{2}'.format(
|
||||
e.__class__.__name__, attr, str(e) or '')
|
||||
raise DynamoDBNumberError(msg)
|
||||
|
||||
def _encode_s(self, attr):
|
||||
if isinstance(attr, unicode):
|
||||
attr = attr.encode('utf-8')
|
||||
elif not isinstance(attr, str):
|
||||
if isinstance(attr, bytes):
|
||||
attr = attr.decode('utf-8')
|
||||
elif not isinstance(attr, six.text_type):
|
||||
attr = str(attr)
|
||||
return attr
|
||||
|
||||
def _encode_ns(self, attr):
|
||||
return map(self._encode_n, attr)
|
||||
return list(map(self._encode_n, attr))
|
||||
|
||||
def _encode_ss(self, attr):
|
||||
return [self._encode_s(n) for n in attr]
|
||||
|
||||
def _encode_b(self, attr):
|
||||
if isinstance(attr, bytes):
|
||||
attr = Binary(attr)
|
||||
return attr.encode()
|
||||
|
||||
def _encode_bs(self, attr):
|
||||
@ -279,7 +309,7 @@ class Dynamizer(object):
|
||||
"""
|
||||
if len(attr) > 1 or not attr:
|
||||
return attr
|
||||
dynamodb_type = attr.keys()[0]
|
||||
dynamodb_type = list(attr.keys())[0]
|
||||
if dynamodb_type.lower() == dynamodb_type:
|
||||
# It's not an actual type, just a single character attr that
|
||||
# overlaps with the DDB types. Return it.
|
||||
|
||||
@ -108,9 +108,11 @@ class Item(object):
|
||||
def __contains__(self, key):
|
||||
return key in self._data
|
||||
|
||||
def __nonzero__(self):
|
||||
def __bool__(self):
|
||||
return bool(self._data)
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def _determine_alterations(self):
|
||||
"""
|
||||
Checks the ``-orig_data`` against the ``_data`` to determine what
|
||||
@ -256,7 +258,7 @@ class Item(object):
|
||||
expects = {}
|
||||
|
||||
if fields is None:
|
||||
fields = self._data.keys() + self._orig_data.keys()
|
||||
fields = list(self._data.keys()) + list(self._orig_data.keys())
|
||||
|
||||
# Only uniques.
|
||||
fields = set(fields)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -52,7 +52,7 @@ class ResultSet(object):
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
self._offset += 1
|
||||
|
||||
if self._offset >= len(self._results):
|
||||
@ -78,6 +78,8 @@ class ResultSet(object):
|
||||
else:
|
||||
raise StopIteration()
|
||||
|
||||
next = __next__
|
||||
|
||||
def to_call(self, the_callable, *args, **kwargs):
|
||||
"""
|
||||
Sets up the callable & any arguments to run it with.
|
||||
@ -106,7 +108,7 @@ class ResultSet(object):
|
||||
# DDB api calls use (which limit page size, not the overall result set).
|
||||
self._limit = kwargs.pop('limit', None)
|
||||
|
||||
if self._limit < 0:
|
||||
if self._limit is not None and self._limit < 0:
|
||||
self._limit = None
|
||||
|
||||
self.the_callable = the_callable
|
||||
@ -130,7 +132,7 @@ class ResultSet(object):
|
||||
|
||||
# If the page size is greater than limit set them
|
||||
# to the same value
|
||||
if self._limit and self._max_page_size > self._limit:
|
||||
if self._limit and self._max_page_size and self._max_page_size > self._limit:
|
||||
self._max_page_size = self._limit
|
||||
|
||||
# Put in the max page size.
|
||||
|
||||
@ -7,7 +7,8 @@ from boto.dynamodb2.fields import (HashKey, RangeKey,
|
||||
from boto.dynamodb2.items import Item
|
||||
from boto.dynamodb2.layer1 import DynamoDBConnection
|
||||
from boto.dynamodb2.results import ResultSet, BatchGetResultSet
|
||||
from boto.dynamodb2.types import Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS
|
||||
from boto.dynamodb2.types import (Dynamizer, FILTER_OPERATORS, QUERY_OPERATORS,
|
||||
STRING)
|
||||
from boto.exception import JSONResponseError
|
||||
|
||||
|
||||
@ -232,18 +233,29 @@ class Table(object):
|
||||
)
|
||||
return table
|
||||
|
||||
def _introspect_schema(self, raw_schema):
|
||||
def _introspect_schema(self, raw_schema, raw_attributes=None):
|
||||
"""
|
||||
Given a raw schema structure back from a DynamoDB response, parse
|
||||
out & build the high-level Python objects that represent them.
|
||||
"""
|
||||
schema = []
|
||||
sane_attributes = {}
|
||||
|
||||
if raw_attributes:
|
||||
for field in raw_attributes:
|
||||
sane_attributes[field['AttributeName']] = field['AttributeType']
|
||||
|
||||
for field in raw_schema:
|
||||
data_type = sane_attributes.get(field['AttributeName'], STRING)
|
||||
|
||||
if field['KeyType'] == 'HASH':
|
||||
schema.append(HashKey(field['AttributeName']))
|
||||
schema.append(
|
||||
HashKey(field['AttributeName'], data_type=data_type)
|
||||
)
|
||||
elif field['KeyType'] == 'RANGE':
|
||||
schema.append(RangeKey(field['AttributeName']))
|
||||
schema.append(
|
||||
RangeKey(field['AttributeName'], data_type=data_type)
|
||||
)
|
||||
else:
|
||||
raise exceptions.UnknownSchemaFieldError(
|
||||
"%s was seen, but is unknown. Please report this at "
|
||||
@ -280,7 +292,7 @@ class Table(object):
|
||||
)
|
||||
|
||||
name = field['IndexName']
|
||||
kwargs['parts'] = self._introspect_schema(field['KeySchema'])
|
||||
kwargs['parts'] = self._introspect_schema(field['KeySchema'], None)
|
||||
indexes.append(index_klass(name, **kwargs))
|
||||
|
||||
return indexes
|
||||
@ -319,7 +331,8 @@ class Table(object):
|
||||
if not self.schema:
|
||||
# Since we have the data, build the schema.
|
||||
raw_schema = result['Table'].get('KeySchema', [])
|
||||
self.schema = self._introspect_schema(raw_schema)
|
||||
raw_attributes = result['Table'].get('AttributeDefinitions', [])
|
||||
self.schema = self._introspect_schema(raw_schema, raw_attributes)
|
||||
|
||||
if not self.indexes:
|
||||
# Build the index information as well.
|
||||
@ -635,16 +648,35 @@ class Table(object):
|
||||
self.connection.update_item(self.table_name, raw_key, item_data, **kwargs)
|
||||
return True
|
||||
|
||||
def delete_item(self, **kwargs):
|
||||
def delete_item(self, expected=None, conditional_operator=None, **kwargs):
|
||||
"""
|
||||
Deletes an item in DynamoDB.
|
||||
Deletes a single item. You can perform a conditional delete operation
|
||||
that deletes the item if it exists, or if it has an expected attribute
|
||||
value.
|
||||
|
||||
Conditional deletes are useful for only deleting items if specific
|
||||
conditions are met. If those conditions are met, DynamoDB performs
|
||||
the delete. Otherwise, the item is not deleted.
|
||||
|
||||
To specify the expected attribute values of the item, you can pass a
|
||||
dictionary of conditions to ``expected``. Each condition should follow
|
||||
the pattern ``<attributename>__<comparison_operator>=<value_to_expect>``.
|
||||
|
||||
**IMPORTANT** - Be careful when using this method, there is no undo.
|
||||
|
||||
To specify the key of the item you'd like to get, you can specify the
|
||||
key attributes as kwargs.
|
||||
|
||||
Returns ``True`` on success.
|
||||
Optionally accepts an ``expected`` parameter which is a dictionary of
|
||||
expected attribute value conditions.
|
||||
|
||||
Optionally accepts a ``conditional_operator`` which applies to the
|
||||
expected attribute value conditions:
|
||||
|
||||
+ `AND` - If all of the conditions evaluate to true (default)
|
||||
+ `OR` - True if at least one condition evaluates to true
|
||||
|
||||
Returns ``True`` on success, ``False`` on failed conditional delete.
|
||||
|
||||
Example::
|
||||
|
||||
@ -663,9 +695,21 @@ class Table(object):
|
||||
... })
|
||||
True
|
||||
|
||||
# Conditional delete
|
||||
>>> users.delete_item(username='johndoe',
|
||||
... expected={'balance__eq': 0})
|
||||
True
|
||||
"""
|
||||
expected = self._build_filters(expected, using=FILTER_OPERATORS)
|
||||
raw_key = self._encode_keys(kwargs)
|
||||
self.connection.delete_item(self.table_name, raw_key)
|
||||
|
||||
try:
|
||||
self.connection.delete_item(self.table_name, raw_key,
|
||||
expected=expected,
|
||||
conditional_operator=conditional_operator)
|
||||
except exceptions.ConditionalCheckFailedException:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_key_fields(self):
|
||||
@ -744,6 +788,9 @@ class Table(object):
|
||||
An internal method for taking query/scan-style ``**kwargs`` & turning
|
||||
them into the raw structure DynamoDB expects for filtering.
|
||||
"""
|
||||
if filter_kwargs is None:
|
||||
return
|
||||
|
||||
filters = {}
|
||||
|
||||
for field_and_op, value in filter_kwargs.items():
|
||||
@ -803,17 +850,34 @@ class Table(object):
|
||||
def query(self, limit=None, index=None, reverse=False, consistent=False,
|
||||
attributes=None, max_page_size=None, **filter_kwargs):
|
||||
"""
|
||||
**WARNING:** This method is provided **strictly** for
|
||||
backward-compatibility. It returns results in an incorrect order.
|
||||
|
||||
If you are writing new code, please use ``Table.query_2``.
|
||||
"""
|
||||
reverse = not reverse
|
||||
return self.query_2(limit=limit, index=index, reverse=reverse,
|
||||
consistent=consistent, attributes=attributes,
|
||||
max_page_size=max_page_size, **filter_kwargs)
|
||||
|
||||
def query_2(self, limit=None, index=None, reverse=False,
|
||||
consistent=False, attributes=None, max_page_size=None,
|
||||
query_filter=None, conditional_operator=None,
|
||||
**filter_kwargs):
|
||||
"""
|
||||
Queries for a set of matching items in a DynamoDB table.
|
||||
|
||||
Queries can be performed against a hash key, a hash+range key or
|
||||
against any data stored in your local secondary indexes.
|
||||
against any data stored in your local secondary indexes. Query filters
|
||||
can be used to filter on arbitrary fields.
|
||||
|
||||
**Note** - You can not query against arbitrary fields within the data
|
||||
stored in DynamoDB.
|
||||
stored in DynamoDB unless you specify ``query_filter`` values.
|
||||
|
||||
To specify the filters of the items you'd like to get, you can specify
|
||||
the filters as kwargs. Each filter kwarg should follow the pattern
|
||||
``<fieldname>__<filter_operation>=<value_to_look_for>``.
|
||||
``<fieldname>__<filter_operation>=<value_to_look_for>``. Query filters
|
||||
are specified in the same way.
|
||||
|
||||
Optionally accepts a ``limit`` parameter, which should be an integer
|
||||
count of the total number of items to return. (Default: ``None`` -
|
||||
@ -824,7 +888,7 @@ class Table(object):
|
||||
(Default: ``None``)
|
||||
|
||||
Optionally accepts a ``reverse`` parameter, which will present the
|
||||
results in reverse order. (Default: ``None`` - normal order)
|
||||
results in reverse order. (Default: ``False`` - normal order)
|
||||
|
||||
Optionally accepts a ``consistent`` parameter, which should be a
|
||||
boolean. If you provide ``True``, it will force a consistent read of
|
||||
@ -842,6 +906,15 @@ class Table(object):
|
||||
the scan from drowning out other queries. (Default: ``None`` -
|
||||
fetch as many as DynamoDB will return)
|
||||
|
||||
Optionally accepts a ``query_filter`` which is a dictionary of filter
|
||||
conditions against any arbitrary field in the returned data.
|
||||
|
||||
Optionally accepts a ``conditional_operator`` which applies to the
|
||||
query filter conditions:
|
||||
|
||||
+ `AND` - True if all filter conditions evaluate to true (default)
|
||||
+ `OR` - True if at least one filter condition evaluates to true
|
||||
|
||||
Returns a ``ResultSet``, which transparently handles the pagination of
|
||||
results you get back.
|
||||
|
||||
@ -880,6 +953,18 @@ class Table(object):
|
||||
'John'
|
||||
'Fred'
|
||||
|
||||
# Filter by non-indexed field(s)
|
||||
>>> results = users.query(
|
||||
... last_name__eq='Doe',
|
||||
... reverse=True,
|
||||
... query_filter={
|
||||
... 'first_name__beginswith': 'A'
|
||||
... }
|
||||
... )
|
||||
>>> for res in results:
|
||||
... print res['first_name'] + ' ' + res['last_name']
|
||||
'Alice Doe'
|
||||
|
||||
"""
|
||||
if self.schema:
|
||||
if len(self.schema) == 1:
|
||||
@ -908,20 +993,26 @@ class Table(object):
|
||||
'consistent': consistent,
|
||||
'select': select,
|
||||
'attributes_to_get': attributes,
|
||||
'query_filter': query_filter,
|
||||
'conditional_operator': conditional_operator,
|
||||
})
|
||||
results.to_call(self._query, **kwargs)
|
||||
return results
|
||||
|
||||
def query_count(self, index=None, consistent=False, **filter_kwargs):
|
||||
def query_count(self, index=None, consistent=False, conditional_operator=None,
|
||||
query_filter=None, scan_index_forward=True, limit=None,
|
||||
**filter_kwargs):
|
||||
"""
|
||||
Queries the exact count of matching items in a DynamoDB table.
|
||||
|
||||
Queries can be performed against a hash key, a hash+range key or
|
||||
against any data stored in your local secondary indexes.
|
||||
against any data stored in your local secondary indexes. Query filters
|
||||
can be used to filter on arbitrary fields.
|
||||
|
||||
To specify the filters of the items you'd like to get, you can specify
|
||||
the filters as kwargs. Each filter kwarg should follow the pattern
|
||||
``<fieldname>__<filter_operation>=<value_to_look_for>``.
|
||||
``<fieldname>__<filter_operation>=<value_to_look_for>``. Query filters
|
||||
are specified in the same way.
|
||||
|
||||
Optionally accepts an ``index`` parameter, which should be a string of
|
||||
name of the local secondary index you want to query against.
|
||||
@ -932,9 +1023,34 @@ class Table(object):
|
||||
the data (more expensive). (Default: ``False`` - use eventually
|
||||
consistent reads)
|
||||
|
||||
Optionally accepts a ``query_filter`` which is a dictionary of filter
|
||||
conditions against any arbitrary field in the returned data.
|
||||
|
||||
Optionally accepts a ``conditional_operator`` which applies to the
|
||||
query filter conditions:
|
||||
|
||||
+ `AND` - True if all filter conditions evaluate to true (default)
|
||||
+ `OR` - True if at least one filter condition evaluates to true
|
||||
|
||||
Returns an integer which represents the exact amount of matched
|
||||
items.
|
||||
|
||||
:type scan_index_forward: boolean
|
||||
:param scan_index_forward: Specifies ascending (true) or descending
|
||||
(false) traversal of the index. DynamoDB returns results reflecting
|
||||
the requested order determined by the range key. If the data type
|
||||
is Number, the results are returned in numeric order. For String,
|
||||
the results are returned in order of ASCII character code values.
|
||||
For Binary, DynamoDB treats each byte of the binary data as
|
||||
unsigned when it compares binary values.
|
||||
|
||||
If ScanIndexForward is not specified, the results are returned in
|
||||
ascending order.
|
||||
|
||||
:type limit: integer
|
||||
:param limit: The maximum number of items to evaluate (not necessarily
|
||||
the number of matching items).
|
||||
|
||||
Example::
|
||||
|
||||
# Look for last names equal to "Doe".
|
||||
@ -956,18 +1072,27 @@ class Table(object):
|
||||
using=QUERY_OPERATORS
|
||||
)
|
||||
|
||||
built_query_filter = self._build_filters(
|
||||
query_filter,
|
||||
using=FILTER_OPERATORS
|
||||
)
|
||||
|
||||
raw_results = self.connection.query(
|
||||
self.table_name,
|
||||
index_name=index,
|
||||
consistent_read=consistent,
|
||||
select='COUNT',
|
||||
key_conditions=key_conditions,
|
||||
query_filter=built_query_filter,
|
||||
conditional_operator=conditional_operator,
|
||||
limit=limit,
|
||||
scan_index_forward=scan_index_forward,
|
||||
)
|
||||
return int(raw_results.get('Count', 0))
|
||||
|
||||
def _query(self, limit=None, index=None, reverse=False, consistent=False,
|
||||
exclusive_start_key=None, select=None, attributes_to_get=None,
|
||||
**filter_kwargs):
|
||||
query_filter=None, conditional_operator=None, **filter_kwargs):
|
||||
"""
|
||||
The internal method that performs the actual queries. Used extensively
|
||||
by ``ResultSet`` to perform each (paginated) request.
|
||||
@ -975,12 +1100,15 @@ class Table(object):
|
||||
kwargs = {
|
||||
'limit': limit,
|
||||
'index_name': index,
|
||||
'scan_index_forward': reverse,
|
||||
'consistent_read': consistent,
|
||||
'select': select,
|
||||
'attributes_to_get': attributes_to_get
|
||||
'attributes_to_get': attributes_to_get,
|
||||
'conditional_operator': conditional_operator,
|
||||
}
|
||||
|
||||
if reverse:
|
||||
kwargs['scan_index_forward'] = False
|
||||
|
||||
if exclusive_start_key:
|
||||
kwargs['exclusive_start_key'] = {}
|
||||
|
||||
@ -994,6 +1122,11 @@ class Table(object):
|
||||
using=QUERY_OPERATORS
|
||||
)
|
||||
|
||||
kwargs['query_filter'] = self._build_filters(
|
||||
query_filter,
|
||||
using=FILTER_OPERATORS
|
||||
)
|
||||
|
||||
raw_results = self.connection.query(
|
||||
self.table_name,
|
||||
**kwargs
|
||||
@ -1020,13 +1153,14 @@ class Table(object):
|
||||
}
|
||||
|
||||
def scan(self, limit=None, segment=None, total_segments=None,
|
||||
max_page_size=None, attributes=None, **filter_kwargs):
|
||||
max_page_size=None, attributes=None, conditional_operator=None,
|
||||
**filter_kwargs):
|
||||
"""
|
||||
Scans across all items within a DynamoDB table.
|
||||
|
||||
Scans can be performed against a hash key or a hash+range key. You can
|
||||
additionally filter the results after the table has been read but
|
||||
before the response is returned.
|
||||
before the response is returned by using query filters.
|
||||
|
||||
To specify the filters of the items you'd like to get, you can specify
|
||||
the filters as kwargs. Each filter kwarg should follow the pattern
|
||||
@ -1091,12 +1225,14 @@ class Table(object):
|
||||
'segment': segment,
|
||||
'total_segments': total_segments,
|
||||
'attributes': attributes,
|
||||
'conditional_operator': conditional_operator,
|
||||
})
|
||||
results.to_call(self._scan, **kwargs)
|
||||
return results
|
||||
|
||||
def _scan(self, limit=None, exclusive_start_key=None, segment=None,
|
||||
total_segments=None, attributes=None, **filter_kwargs):
|
||||
total_segments=None, attributes=None, conditional_operator=None,
|
||||
**filter_kwargs):
|
||||
"""
|
||||
The internal method that performs the actual scan. Used extensively
|
||||
by ``ResultSet`` to perform each (paginated) request.
|
||||
@ -1106,6 +1242,7 @@ class Table(object):
|
||||
'segment': segment,
|
||||
'total_segments': total_segments,
|
||||
'attributes_to_get': attributes,
|
||||
'conditional_operator': conditional_operator,
|
||||
}
|
||||
|
||||
if exclusive_start_key:
|
||||
@ -1146,7 +1283,7 @@ class Table(object):
|
||||
'last_key': last_key,
|
||||
}
|
||||
|
||||
def batch_get(self, keys, consistent=False):
|
||||
def batch_get(self, keys, consistent=False, attributes=None):
|
||||
"""
|
||||
Fetches many specific items in batch from a table.
|
||||
|
||||
@ -1157,6 +1294,10 @@ class Table(object):
|
||||
boolean. If you provide ``True``, a strongly consistent read will be
|
||||
used. (Default: False)
|
||||
|
||||
Optionally accepts an ``attributes`` parameter, which should be a
|
||||
tuple. If you provide any attributes only these will be fetched
|
||||
from DynamoDB.
|
||||
|
||||
Returns a ``ResultSet``, which transparently handles the pagination of
|
||||
results you get back.
|
||||
|
||||
@ -1183,10 +1324,10 @@ class Table(object):
|
||||
# We pass the keys to the constructor instead, so it can maintain it's
|
||||
# own internal state as to what keys have been processed.
|
||||
results = BatchGetResultSet(keys=keys, max_batch_get=self.max_batch_get)
|
||||
results.to_call(self._batch_get, consistent=False)
|
||||
results.to_call(self._batch_get, consistent=consistent, attributes=attributes)
|
||||
return results
|
||||
|
||||
def _batch_get(self, keys, consistent=False):
|
||||
def _batch_get(self, keys, consistent=False, attributes=None):
|
||||
"""
|
||||
The internal method that performs the actual batch get. Used extensively
|
||||
by ``BatchGetResultSet`` to perform each (paginated) request.
|
||||
@ -1200,6 +1341,9 @@ class Table(object):
|
||||
if consistent:
|
||||
items[self.table_name]['ConsistentRead'] = True
|
||||
|
||||
if attributes is not None:
|
||||
items[self.table_name]['AttributesToGet'] = attributes
|
||||
|
||||
for key_data in keys:
|
||||
raw_key = {}
|
||||
|
||||
|
||||
@ -45,6 +45,7 @@ from boto.ec2.autoscale.instance import Instance
|
||||
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
|
||||
from boto.ec2.autoscale.tag import Tag
|
||||
from boto.ec2.autoscale.limits import AccountLimits
|
||||
from boto.compat import six
|
||||
|
||||
RegionData = load_regions().get('autoscaling', {})
|
||||
|
||||
@ -134,15 +135,15 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
['us-east-1b',...]
|
||||
"""
|
||||
# different from EC2 list params
|
||||
for i in xrange(1, len(items) + 1):
|
||||
for i in range(1, len(items) + 1):
|
||||
if isinstance(items[i - 1], dict):
|
||||
for k, v in items[i - 1].iteritems():
|
||||
for k, v in six.iteritems(items[i - 1]):
|
||||
if isinstance(v, dict):
|
||||
for kk, vv in v.iteritems():
|
||||
for kk, vv in six.iteritems(v):
|
||||
params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv
|
||||
else:
|
||||
params['%s.member.%d.%s' % (label, i, k)] = v
|
||||
elif isinstance(items[i - 1], basestring):
|
||||
elif isinstance(items[i - 1], six.string_types):
|
||||
params['%s.member.%d' % (label, i)] = items[i - 1]
|
||||
|
||||
def _update_group(self, op, as_group):
|
||||
@ -221,7 +222,7 @@ class AutoScaleConnection(AWSQueryConnection):
|
||||
if launch_config.key_name:
|
||||
params['KeyName'] = launch_config.key_name
|
||||
if launch_config.user_data:
|
||||
params['UserData'] = base64.b64encode(launch_config.user_data)
|
||||
params['UserData'] = base64.b64encode(launch_config.user_data).decode('utf-8')
|
||||
if launch_config.kernel_id:
|
||||
params['KernelId'] = launch_config.kernel_id
|
||||
if launch_config.ramdisk_id:
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import boto.ec2
|
||||
from boto.sdb.db.property import StringProperty, IntegerProperty
|
||||
from boto.manage import propget
|
||||
@ -66,19 +65,19 @@ if __name__ == "__main__":
|
||||
obj.get(params)
|
||||
offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'],
|
||||
availability_zone=params['zone'].name)
|
||||
print '\nThe following Reserved Instances Offerings are available:\n'
|
||||
print('\nThe following Reserved Instances Offerings are available:\n')
|
||||
for offering in offerings:
|
||||
offering.describe()
|
||||
prop = StringProperty(name='offering', verbose_name='Offering',
|
||||
choices=offerings)
|
||||
offering = propget.get(prop)
|
||||
print '\nYou have chosen this offering:'
|
||||
print('\nYou have chosen this offering:')
|
||||
offering.describe()
|
||||
unit_price = float(offering.fixed_price)
|
||||
total_price = unit_price * params['quantity']
|
||||
print '!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price)
|
||||
print('!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price))
|
||||
answer = raw_input('Are you sure you want to do this? If so, enter YES: ')
|
||||
if answer.strip().lower() == 'yes':
|
||||
offering.purchase(params['quantity'])
|
||||
else:
|
||||
print 'Purchase cancelled'
|
||||
print('Purchase cancelled')
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
This module provides an interface to the Elastic Compute Cloud (EC2)
|
||||
CloudWatch service from AWS.
|
||||
"""
|
||||
from boto.compat import json
|
||||
from boto.compat import json, map, six, zip
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.ec2.cloudwatch.metric import Metric
|
||||
from boto.ec2.cloudwatch.alarm import MetricAlarm, MetricAlarms, AlarmHistoryItem
|
||||
@ -110,7 +110,7 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
for dim_name in dimension:
|
||||
dim_value = dimension[dim_name]
|
||||
if dim_value:
|
||||
if isinstance(dim_value, basestring):
|
||||
if isinstance(dim_value, six.string_types):
|
||||
dim_value = [dim_value]
|
||||
for value in dim_value:
|
||||
params['%s.%d.Name' % (prefix, i+1)] = dim_name
|
||||
@ -121,12 +121,12 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
i += 1
|
||||
|
||||
def build_list_params(self, params, items, label):
|
||||
if isinstance(items, basestring):
|
||||
if isinstance(items, six.string_types):
|
||||
items = [items]
|
||||
for index, item in enumerate(items):
|
||||
i = index + 1
|
||||
if isinstance(item, dict):
|
||||
for k, v in item.iteritems():
|
||||
for k, v in six.iteritems(item):
|
||||
params[label % (i, 'Name')] = k
|
||||
if v is not None:
|
||||
params[label % (i, 'Value')] = v
|
||||
@ -171,7 +171,7 @@ class CloudWatchConnection(AWSQueryConnection):
|
||||
else:
|
||||
raise Exception('Must specify a value or statistics to put.')
|
||||
|
||||
for key, val in metric_data.iteritems():
|
||||
for key, val in six.iteritems(metric_data):
|
||||
params['MetricData.member.%d.%s' % (index + 1, key)] = val
|
||||
|
||||
def get_metric_statistics(self, period, start_time, end_time, metric_name,
|
||||
|
||||
@ -21,10 +21,10 @@
|
||||
#
|
||||
|
||||
from datetime import datetime
|
||||
from boto.resultset import ResultSet
|
||||
from boto.ec2.cloudwatch.listelement import ListElement
|
||||
from boto.ec2.cloudwatch.dimension import Dimension
|
||||
from boto.compat import json
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class MetricAlarms(list):
|
||||
@ -57,7 +57,7 @@ class MetricAlarm(object):
|
||||
'<': 'LessThanThreshold',
|
||||
'<=': 'LessThanOrEqualToThreshold',
|
||||
}
|
||||
_rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())
|
||||
_rev_cmp_map = dict((v, k) for (k, v) in six.iteritems(_cmp_map))
|
||||
|
||||
def __init__(self, connection=None, name=None, metric=None,
|
||||
namespace=None, statistic=None, comparison=None,
|
||||
@ -252,11 +252,11 @@ class MetricAlarm(object):
|
||||
|
||||
def add_alarm_action(self, action_arn=None):
|
||||
"""
|
||||
Adds an alarm action, represented as an SNS topic, to this alarm.
|
||||
Adds an alarm action, represented as an SNS topic, to this alarm.
|
||||
What do do when alarm is triggered.
|
||||
|
||||
:type action_arn: str
|
||||
:param action_arn: SNS topics to which notification should be
|
||||
:param action_arn: SNS topics to which notification should be
|
||||
sent if the alarm goes to state ALARM.
|
||||
"""
|
||||
if not action_arn:
|
||||
@ -270,21 +270,21 @@ class MetricAlarm(object):
|
||||
this alarm. What to do when the insufficient_data state is reached.
|
||||
|
||||
:type action_arn: str
|
||||
:param action_arn: SNS topics to which notification should be
|
||||
:param action_arn: SNS topics to which notification should be
|
||||
sent if the alarm goes to state INSUFFICIENT_DATA.
|
||||
"""
|
||||
if not action_arn:
|
||||
return
|
||||
self.actions_enabled = 'true'
|
||||
self.insufficient_data_actions.append(action_arn)
|
||||
|
||||
|
||||
def add_ok_action(self, action_arn=None):
|
||||
"""
|
||||
Adds an ok action, represented as an SNS topic, to this alarm. What
|
||||
to do when the ok state is reached.
|
||||
|
||||
:type action_arn: str
|
||||
:param action_arn: SNS topics to which notification should be
|
||||
:param action_arn: SNS topics to which notification should be
|
||||
sent if the alarm goes to state INSUFFICIENT_DATA.
|
||||
"""
|
||||
if not action_arn:
|
||||
@ -320,4 +320,3 @@ class AlarmHistoryItem(object):
|
||||
'%Y-%m-%dT%H:%M:%S.%fZ')
|
||||
except ValueError:
|
||||
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
|
||||
@ -37,4 +37,3 @@ class Datapoint(dict):
|
||||
self[name] = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
|
||||
elif name != 'member':
|
||||
self[name] = value
|
||||
|
||||
|
||||
@ -35,4 +35,3 @@ class Dimension(dict):
|
||||
self[self._name] = [value]
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
|
||||
@ -27,5 +27,3 @@ class ListElement(list):
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'member':
|
||||
self.append(value)
|
||||
|
||||
|
||||
|
||||
@ -65,13 +65,14 @@ from boto.ec2.networkinterface import NetworkInterface
|
||||
from boto.ec2.attributes import AccountAttribute, VPCAttribute
|
||||
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
|
||||
from boto.exception import EC2ResponseError
|
||||
from boto.compat import six
|
||||
|
||||
#boto.set_stream_logger('ec2')
|
||||
|
||||
|
||||
class EC2Connection(AWSQueryConnection):
|
||||
|
||||
APIVersion = boto.config.get('Boto', 'ec2_version', '2013-10-15')
|
||||
APIVersion = boto.config.get('Boto', 'ec2_version', '2014-05-01')
|
||||
DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
|
||||
DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
|
||||
'ec2.us-east-1.amazonaws.com')
|
||||
@ -109,7 +110,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
|
||||
def get_params(self):
|
||||
"""
|
||||
Returns a dictionary containing the value of of all of the keyword
|
||||
Returns a dictionary containing the value of all of the keyword
|
||||
arguments passed when constructing this connection.
|
||||
"""
|
||||
param_names = ['aws_access_key_id', 'aws_secret_access_key',
|
||||
@ -122,6 +123,9 @@ class EC2Connection(AWSQueryConnection):
|
||||
return params
|
||||
|
||||
def build_filter_params(self, params, filters):
|
||||
if not isinstance(filters, dict):
|
||||
filters = dict(filters)
|
||||
|
||||
i = 1
|
||||
for name in filters:
|
||||
aws_name = name
|
||||
@ -266,7 +270,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
root_device_name=None, block_device_map=None,
|
||||
dry_run=False, virtualization_type=None,
|
||||
sriov_net_support=None,
|
||||
snapshot_id=None):
|
||||
snapshot_id=None,
|
||||
delete_root_volume_on_termination=False):
|
||||
"""
|
||||
Register an image.
|
||||
|
||||
@ -315,6 +320,12 @@ class EC2Connection(AWSQueryConnection):
|
||||
as root device for the image. Mutually exclusive with
|
||||
block_device_map, requires root_device_name
|
||||
|
||||
:type delete_root_volume_on_termination: bool
|
||||
:param delete_root_volume_on_termination: Whether to delete the root
|
||||
volume of the image after instance termination. Only applies when
|
||||
creating image from snapshot_id. Defaults to False. Note that
|
||||
leaving volumes behind after instance termination is not free.
|
||||
|
||||
:rtype: string
|
||||
:return: The new image id
|
||||
"""
|
||||
@ -334,7 +345,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
if root_device_name:
|
||||
params['RootDeviceName'] = root_device_name
|
||||
if snapshot_id:
|
||||
root_vol = BlockDeviceType(snapshot_id=snapshot_id)
|
||||
root_vol = BlockDeviceType(snapshot_id=snapshot_id,
|
||||
delete_on_termination=delete_root_volume_on_termination)
|
||||
block_device_map = BlockDeviceMapping()
|
||||
block_device_map[root_device_name] = root_vol
|
||||
if block_device_map:
|
||||
@ -602,15 +614,24 @@ class EC2Connection(AWSQueryConnection):
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.ec2.instance.Instance`
|
||||
"""
|
||||
reservations = self.get_all_reservations(instance_ids=instance_ids,
|
||||
filters=filters,
|
||||
dry_run=dry_run,
|
||||
max_results=max_results)
|
||||
return [instance for reservation in reservations
|
||||
for instance in reservation.instances]
|
||||
next_token = None
|
||||
retval = []
|
||||
while True:
|
||||
reservations = self.get_all_reservations(instance_ids=instance_ids,
|
||||
filters=filters,
|
||||
dry_run=dry_run,
|
||||
max_results=max_results,
|
||||
next_token=next_token)
|
||||
retval.extend([instance for reservation in reservations for
|
||||
instance in reservation.instances])
|
||||
next_token = reservations.next_token
|
||||
if not next_token:
|
||||
break
|
||||
|
||||
return retval
|
||||
|
||||
def get_all_reservations(self, instance_ids=None, filters=None,
|
||||
dry_run=False, max_results=None):
|
||||
dry_run=False, max_results=None, next_token=None):
|
||||
"""
|
||||
Retrieve all the instance reservations associated with your account.
|
||||
|
||||
@ -632,6 +653,10 @@ class EC2Connection(AWSQueryConnection):
|
||||
:param max_results: The maximum number of paginated instance
|
||||
items per response.
|
||||
|
||||
:type next_token: str
|
||||
:param next_token: A string specifying the next paginated set
|
||||
of results to return.
|
||||
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.ec2.instance.Reservation`
|
||||
"""
|
||||
@ -652,12 +677,15 @@ class EC2Connection(AWSQueryConnection):
|
||||
params['DryRun'] = 'true'
|
||||
if max_results is not None:
|
||||
params['MaxResults'] = max_results
|
||||
if next_token:
|
||||
params['NextToken'] = next_token
|
||||
return self.get_list('DescribeInstances', params,
|
||||
[('item', Reservation)], verb='POST')
|
||||
|
||||
def get_all_instance_status(self, instance_ids=None,
|
||||
max_results=None, next_token=None,
|
||||
filters=None, dry_run=False):
|
||||
filters=None, dry_run=False,
|
||||
include_all_instances=False):
|
||||
"""
|
||||
Retrieve all the instances in your account scheduled for maintenance.
|
||||
|
||||
@ -685,6 +713,11 @@ class EC2Connection(AWSQueryConnection):
|
||||
:type dry_run: bool
|
||||
:param dry_run: Set to True if the operation should not actually run.
|
||||
|
||||
:type include_all_instances: bool
|
||||
:param include_all_instances: Set to True if all
|
||||
instances should be returned. (Only running
|
||||
instances are included by default.)
|
||||
|
||||
:rtype: list
|
||||
:return: A list of instances that have maintenance scheduled.
|
||||
"""
|
||||
@ -699,6 +732,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
self.build_filter_params(params, filters)
|
||||
if dry_run:
|
||||
params['DryRun'] = 'true'
|
||||
if include_all_instances:
|
||||
params['IncludeAllInstances'] = 'true'
|
||||
return self.get_object('DescribeInstanceStatus', params,
|
||||
InstanceStatusSet, verb='POST')
|
||||
|
||||
@ -775,6 +810,9 @@ class EC2Connection(AWSQueryConnection):
|
||||
* i2.2xlarge
|
||||
* i2.4xlarge
|
||||
* i2.8xlarge
|
||||
* t2.micro
|
||||
* t2.small
|
||||
* t2.medium
|
||||
|
||||
:type placement: string
|
||||
:param placement: The Availability Zone to launch the instance into.
|
||||
@ -788,7 +826,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
instances.
|
||||
|
||||
:type monitoring_enabled: bool
|
||||
:param monitoring_enabled: Enable CloudWatch monitoring on
|
||||
:param monitoring_enabled: Enable detailed CloudWatch monitoring on
|
||||
the instance.
|
||||
|
||||
:type subnet_id: string
|
||||
@ -856,9 +894,9 @@ class EC2Connection(AWSQueryConnection):
|
||||
provide optimal EBS I/O performance. This optimization
|
||||
isn't available with all instance types.
|
||||
|
||||
:type network_interfaces: list
|
||||
:param network_interfaces: A list of
|
||||
:class:`boto.ec2.networkinterface.NetworkInterfaceSpecification`
|
||||
:type network_interfaces: :class:`boto.ec2.networkinterface.NetworkInterfaceCollection`
|
||||
:param network_interfaces: A NetworkInterfaceCollection data
|
||||
structure containing the ENI specifications for the instance.
|
||||
|
||||
:type dry_run: bool
|
||||
:param dry_run: Set to True if the operation should not actually run.
|
||||
@ -889,7 +927,9 @@ class EC2Connection(AWSQueryConnection):
|
||||
l.append(group)
|
||||
self.build_list_params(params, l, 'SecurityGroup')
|
||||
if user_data:
|
||||
params['UserData'] = base64.b64encode(user_data)
|
||||
if isinstance(user_data, six.text_type):
|
||||
user_data = user_data.encode('utf-8')
|
||||
params['UserData'] = base64.b64encode(user_data).decode('utf-8')
|
||||
if addressing_type:
|
||||
params['AddressingType'] = addressing_type
|
||||
if instance_type:
|
||||
@ -1470,6 +1510,9 @@ class EC2Connection(AWSQueryConnection):
|
||||
* i2.2xlarge
|
||||
* i2.4xlarge
|
||||
* i2.8xlarge
|
||||
* t2.micro
|
||||
* t2.small
|
||||
* t2.medium
|
||||
|
||||
:type placement: string
|
||||
:param placement: The availability zone in which to launch
|
||||
@ -1484,7 +1527,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
instances
|
||||
|
||||
:type monitoring_enabled: bool
|
||||
:param monitoring_enabled: Enable CloudWatch monitoring on
|
||||
:param monitoring_enabled: Enable detailed CloudWatch monitoring on
|
||||
the instance.
|
||||
|
||||
:type subnet_id: string
|
||||
@ -2223,8 +2266,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
params['DryRun'] = 'true'
|
||||
return self.get_status('ModifyVolumeAttribute', params, verb='POST')
|
||||
|
||||
def create_volume(self, size, zone, snapshot=None,
|
||||
volume_type=None, iops=None, dry_run=False):
|
||||
def create_volume(self, size, zone, snapshot=None, volume_type=None,
|
||||
iops=None, encrypted=False, dry_run=False):
|
||||
"""
|
||||
Create a new EBS Volume.
|
||||
|
||||
@ -2240,12 +2283,16 @@ class EC2Connection(AWSQueryConnection):
|
||||
|
||||
:type volume_type: string
|
||||
:param volume_type: The type of the volume. (optional). Valid
|
||||
values are: standard | io1.
|
||||
values are: standard | io1 | gp2.
|
||||
|
||||
:type iops: int
|
||||
:param iops: The provisioned IOPs you want to associate with
|
||||
:param iops: The provisioned IOPS you want to associate with
|
||||
this volume. (optional)
|
||||
|
||||
:type encrypted: bool
|
||||
:param encrypted: Specifies whether the volume should be encrypted.
|
||||
(optional)
|
||||
|
||||
:type dry_run: bool
|
||||
:param dry_run: Set to True if the operation should not actually run.
|
||||
|
||||
@ -2263,6 +2310,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
params['VolumeType'] = volume_type
|
||||
if iops:
|
||||
params['Iops'] = str(iops)
|
||||
if encrypted:
|
||||
params['Encrypted'] = 'true'
|
||||
if dry_run:
|
||||
params['DryRun'] = 'true'
|
||||
return self.get_object('CreateVolume', params, Volume, verb='POST')
|
||||
@ -2790,7 +2839,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
keynames=[keyname],
|
||||
dry_run=dry_run
|
||||
)[0]
|
||||
except self.ResponseError, e:
|
||||
except self.ResponseError as e:
|
||||
if e.code == 'InvalidKeyPair.NotFound':
|
||||
return None
|
||||
else:
|
||||
@ -3819,7 +3868,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
|
||||
def monitor_instances(self, instance_ids, dry_run=False):
|
||||
"""
|
||||
Enable CloudWatch monitoring for the supplied instances.
|
||||
Enable detailed CloudWatch monitoring for the supplied instances.
|
||||
|
||||
:type instance_id: list of strings
|
||||
:param instance_id: The instance ids
|
||||
@ -3840,7 +3889,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
def monitor_instance(self, instance_id, dry_run=False):
|
||||
"""
|
||||
Deprecated Version, maintained for backward compatibility.
|
||||
Enable CloudWatch monitoring for the supplied instance.
|
||||
Enable detailed CloudWatch monitoring for the supplied instance.
|
||||
|
||||
:type instance_id: string
|
||||
:param instance_id: The instance id
|
||||
@ -3876,7 +3925,7 @@ class EC2Connection(AWSQueryConnection):
|
||||
def unmonitor_instance(self, instance_id, dry_run=False):
|
||||
"""
|
||||
Deprecated Version, maintained for backward compatibility.
|
||||
Disable CloudWatch monitoring for the supplied instance.
|
||||
Disable detailed CloudWatch monitoring for the supplied instance.
|
||||
|
||||
:type instance_id: string
|
||||
:param instance_id: The instance id
|
||||
@ -4184,11 +4233,14 @@ class EC2Connection(AWSQueryConnection):
|
||||
|
||||
# Network Interface methods
|
||||
|
||||
def get_all_network_interfaces(self, filters=None, dry_run=False):
|
||||
def get_all_network_interfaces(self, network_interface_ids=None, filters=None, dry_run=False):
|
||||
"""
|
||||
Retrieve all of the Elastic Network Interfaces (ENI's)
|
||||
associated with your account.
|
||||
|
||||
:type network_interface_ids: list
|
||||
:param network_interface_ids: a list of strings representing ENI IDs
|
||||
|
||||
:type filters: dict
|
||||
:param filters: Optional filters that can be used to limit
|
||||
the results returned. Filters are provided
|
||||
@ -4206,6 +4258,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
:return: A list of :class:`boto.ec2.networkinterface.NetworkInterface`
|
||||
"""
|
||||
params = {}
|
||||
if network_interface_ids:
|
||||
self.build_list_params(params, network_interface_ids, 'NetworkInterfaceId')
|
||||
if filters:
|
||||
self.build_filter_params(params, filters)
|
||||
if dry_run:
|
||||
@ -4339,7 +4393,8 @@ class EC2Connection(AWSQueryConnection):
|
||||
"""
|
||||
:type dry_run: bool
|
||||
:param dry_run: Set to True if the operation should not actually run.
|
||||
|
||||
:rtype: :class:`boto.ec2.image.CopyImage`
|
||||
:return: Object containing the image_id of the copied image.
|
||||
"""
|
||||
params = {
|
||||
'SourceRegion': source_region,
|
||||
|
||||
@ -85,6 +85,27 @@ class TaggedEC2Object(EC2Object):
|
||||
self.tags = TagSet()
|
||||
self.tags[key] = value
|
||||
|
||||
def add_tags(self, tags, dry_run=False):
|
||||
"""
|
||||
Add tags to this object. Tags are stored by AWS and can be used
|
||||
to organize and filter resources. Adding tags involves a round-trip
|
||||
to the EC2 service.
|
||||
|
||||
:type tags: dict
|
||||
:param tags: A dictionary of key-value pairs for the tags being stored.
|
||||
If for some tags you want only the name and no value, the
|
||||
corresponding value for that tag name should be an empty
|
||||
string.
|
||||
"""
|
||||
status = self.connection.create_tags(
|
||||
[self.id],
|
||||
tags,
|
||||
dry_run=dry_run
|
||||
)
|
||||
if self.tags is None:
|
||||
self.tags = TagSet()
|
||||
self.tags.update(tags)
|
||||
|
||||
def remove_tag(self, key, value=None, dry_run=False):
|
||||
"""
|
||||
Remove a tag from this object. Removing a tag involves a round-trip
|
||||
@ -102,7 +123,7 @@ class TaggedEC2Object(EC2Object):
|
||||
NOTE: There is an important distinction between
|
||||
a value of '' and a value of None.
|
||||
"""
|
||||
if value:
|
||||
if value is not None:
|
||||
tags = {key : value}
|
||||
else:
|
||||
tags = [key]
|
||||
|
||||
@ -30,9 +30,9 @@ from boto.ec2.instanceinfo import InstanceInfo
|
||||
from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
|
||||
from boto.ec2.elb.instancestate import InstanceState
|
||||
from boto.ec2.elb.healthcheck import HealthCheck
|
||||
from boto.ec2.elb.listelement import ListElement
|
||||
from boto.regioninfo import RegionInfo, get_regions, load_regions
|
||||
import boto
|
||||
from boto.compat import six
|
||||
|
||||
RegionData = load_regions().get('elasticloadbalancing', {})
|
||||
|
||||
@ -68,8 +68,9 @@ class ELBConnection(AWSQueryConnection):
|
||||
|
||||
APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01')
|
||||
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
|
||||
DefaultRegionEndpoint = boto.config.get('Boto', 'elb_region_endpoint',
|
||||
'elasticloadbalancing.us-east-1.amazonaws.com')
|
||||
DefaultRegionEndpoint = boto.config.get(
|
||||
'Boto', 'elb_region_endpoint',
|
||||
'elasticloadbalancing.us-east-1.amazonaws.com')
|
||||
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
@ -87,31 +88,37 @@ class ELBConnection(AWSQueryConnection):
|
||||
self.DefaultRegionEndpoint)
|
||||
self.region = region
|
||||
super(ELBConnection, self).__init__(aws_access_key_id,
|
||||
aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
proxy_user, proxy_pass,
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
proxy_user, proxy_pass,
|
||||
self.region.endpoint, debug,
|
||||
https_connection_factory, path,
|
||||
security_token,
|
||||
validate_certs=validate_certs,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['ec2']
|
||||
|
||||
def build_list_params(self, params, items, label):
|
||||
if isinstance(items, basestring):
|
||||
if isinstance(items, six.string_types):
|
||||
items = [items]
|
||||
for index, item in enumerate(items):
|
||||
params[label % (index + 1)] = item
|
||||
|
||||
def get_all_load_balancers(self, load_balancer_names=None):
|
||||
def get_all_load_balancers(self, load_balancer_names=None, marker=None):
|
||||
"""
|
||||
Retrieve all load balancers associated with your account.
|
||||
|
||||
:type load_balancer_names: list
|
||||
:keyword load_balancer_names: An optional list of load balancer names.
|
||||
|
||||
:type marker: string
|
||||
:param marker: Use this only when paginating results and only
|
||||
in follow-up request after you've received a response
|
||||
where the results are truncated. Set this to the value of
|
||||
the Marker element in the response you just received.
|
||||
|
||||
:rtype: :py:class:`boto.resultset.ResultSet`
|
||||
:return: A ResultSet containing instances of
|
||||
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
|
||||
@ -120,11 +127,16 @@ class ELBConnection(AWSQueryConnection):
|
||||
if load_balancer_names:
|
||||
self.build_list_params(params, load_balancer_names,
|
||||
'LoadBalancerNames.member.%d')
|
||||
|
||||
if marker:
|
||||
params['Marker'] = marker
|
||||
|
||||
return self.get_list('DescribeLoadBalancers', params,
|
||||
[('member', LoadBalancer)])
|
||||
|
||||
def create_load_balancer(self, name, zones, listeners=None, subnets=None,
|
||||
security_groups=None, scheme='internet-facing', complex_listeners=None):
|
||||
security_groups=None, scheme='internet-facing',
|
||||
complex_listeners=None):
|
||||
"""
|
||||
Create a new load balancer for your account. By default the load
|
||||
balancer will be created in EC2. To create a load balancer inside a
|
||||
@ -170,13 +182,14 @@ class ELBConnection(AWSQueryConnection):
|
||||
|
||||
:type complex_listeners: List of tuples
|
||||
:param complex_listeners: Each tuple contains four or five values,
|
||||
(LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol,
|
||||
SSLCertificateId).
|
||||
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
|
||||
InstanceProtocol, SSLCertificateId).
|
||||
|
||||
Where:
|
||||
- LoadBalancerPortNumber and InstancePortNumber are integer
|
||||
values between 1 and 65535
|
||||
- Protocol and InstanceProtocol is a string containing either 'TCP',
|
||||
- Protocol and InstanceProtocol is a string containing
|
||||
either 'TCP',
|
||||
'SSL', 'HTTP', or 'HTTPS'
|
||||
- SSLCertificateId is the ARN of an SSL certificate loaded into
|
||||
AWS IAM
|
||||
@ -224,7 +237,7 @@ class ELBConnection(AWSQueryConnection):
|
||||
|
||||
if security_groups:
|
||||
self.build_list_params(params, security_groups,
|
||||
'SecurityGroups.member.%d')
|
||||
'SecurityGroups.member.%d')
|
||||
|
||||
load_balancer = self.get_object('CreateLoadBalancer',
|
||||
params, LoadBalancer)
|
||||
@ -235,7 +248,8 @@ class ELBConnection(AWSQueryConnection):
|
||||
load_balancer.security_groups = security_groups
|
||||
return load_balancer
|
||||
|
||||
def create_load_balancer_listeners(self, name, listeners=None, complex_listeners=None):
|
||||
def create_load_balancer_listeners(self, name, listeners=None,
|
||||
complex_listeners=None):
|
||||
"""
|
||||
Creates a Listener (or group of listeners) for an existing
|
||||
Load Balancer
|
||||
@ -254,13 +268,14 @@ class ELBConnection(AWSQueryConnection):
|
||||
|
||||
:type complex_listeners: List of tuples
|
||||
:param complex_listeners: Each tuple contains four or five values,
|
||||
(LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol,
|
||||
SSLCertificateId).
|
||||
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
|
||||
InstanceProtocol, SSLCertificateId).
|
||||
|
||||
Where:
|
||||
- LoadBalancerPortNumber and InstancePortNumber are integer
|
||||
values between 1 and 65535
|
||||
- Protocol and InstanceProtocol is a string containing either 'TCP',
|
||||
- Protocol and InstanceProtocol is a string containing
|
||||
either 'TCP',
|
||||
'SSL', 'HTTP', or 'HTTPS'
|
||||
- SSLCertificateId is the ARN of an SSL certificate loaded into
|
||||
AWS IAM
|
||||
@ -347,7 +362,7 @@ class ELBConnection(AWSQueryConnection):
|
||||
self.build_list_params(params, zones_to_add,
|
||||
'AvailabilityZones.member.%d')
|
||||
obj = self.get_object('EnableAvailabilityZonesForLoadBalancer',
|
||||
params, LoadBalancerZones)
|
||||
params, LoadBalancerZones)
|
||||
return obj.zones
|
||||
|
||||
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
|
||||
@ -372,7 +387,7 @@ class ELBConnection(AWSQueryConnection):
|
||||
self.build_list_params(params, zones_to_remove,
|
||||
'AvailabilityZones.member.%d')
|
||||
obj = self.get_object('DisableAvailabilityZonesForLoadBalancer',
|
||||
params, LoadBalancerZones)
|
||||
params, LoadBalancerZones)
|
||||
return obj.zones
|
||||
|
||||
def modify_lb_attribute(self, load_balancer_name, attribute, value):
|
||||
@ -386,6 +401,7 @@ class ELBConnection(AWSQueryConnection):
|
||||
|
||||
* crossZoneLoadBalancing - Boolean (true)
|
||||
* accessLog - :py:class:`AccessLogAttribute` instance
|
||||
* connectionDraining - :py:class:`ConnectionDrainingAttribute` instance
|
||||
|
||||
:type value: string
|
||||
:param value: The new value for the attribute
|
||||
@ -415,6 +431,11 @@ class ELBConnection(AWSQueryConnection):
|
||||
value.s3_bucket_prefix
|
||||
params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \
|
||||
value.emit_interval
|
||||
elif attribute.lower() == 'connectiondraining':
|
||||
params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \
|
||||
value.enabled and 'true' or 'false'
|
||||
params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \
|
||||
value.timeout
|
||||
else:
|
||||
raise ValueError('InvalidAttribute', attribute)
|
||||
return self.get_status('ModifyLoadBalancerAttributes', params,
|
||||
@ -445,14 +466,21 @@ class ELBConnection(AWSQueryConnection):
|
||||
:type attribute: string
|
||||
:param attribute: The attribute you wish to see.
|
||||
|
||||
* accessLog - :py:class:`AccessLogAttribute` instance
|
||||
* crossZoneLoadBalancing - Boolean
|
||||
* connectionDraining - :py:class:`ConnectionDrainingAttribute`
|
||||
instance
|
||||
|
||||
:rtype: Attribute dependent
|
||||
:return: The new value for the attribute
|
||||
"""
|
||||
attributes = self.get_all_lb_attributes(load_balancer_name)
|
||||
if attribute.lower() == 'accesslog':
|
||||
return attributes.access_log
|
||||
if attribute.lower() == 'crosszoneloadbalancing':
|
||||
return attributes.cross_zone_load_balancing.enabled
|
||||
if attribute.lower() == 'connectiondraining':
|
||||
return attributes.connection_draining
|
||||
return None
|
||||
|
||||
def register_instances(self, load_balancer_name, instances):
|
||||
@ -601,17 +629,19 @@ class ELBConnection(AWSQueryConnection):
|
||||
params['CookieExpirationPeriod'] = cookie_expiration_period
|
||||
return self.get_status('CreateLBCookieStickinessPolicy', params)
|
||||
|
||||
def create_lb_policy(self, lb_name, policy_name, policy_type, policy_attributes):
|
||||
def create_lb_policy(self, lb_name, policy_name, policy_type,
|
||||
policy_attributes):
|
||||
"""
|
||||
Creates a new policy that contais the necessary attributes depending on
|
||||
the policy type. Policies are settings that are saved for your load
|
||||
balancer and that can be applied to the front-end listener, or
|
||||
the back-end application server.
|
||||
Creates a new policy that contains the necessary attributes
|
||||
depending on the policy type. Policies are settings that are
|
||||
saved for your load balancer and that can be applied to the
|
||||
front-end listener, or the back-end application server.
|
||||
|
||||
"""
|
||||
params = {'LoadBalancerName': lb_name,
|
||||
'PolicyName': policy_name,
|
||||
'PolicyTypeName': policy_type}
|
||||
for index, (name, value) in enumerate(policy_attributes.iteritems(), 1):
|
||||
for index, (name, value) in enumerate(six.iteritems(policy_attributes), 1):
|
||||
params['PolicyAttributes.member.%d.AttributeName' % index] = name
|
||||
params['PolicyAttributes.member.%d.AttributeValue' % index] = value
|
||||
else:
|
||||
@ -635,10 +665,14 @@ class ELBConnection(AWSQueryConnection):
|
||||
"""
|
||||
params = {'LoadBalancerName': lb_name,
|
||||
'LoadBalancerPort': lb_port}
|
||||
self.build_list_params(params, policies, 'PolicyNames.member.%d')
|
||||
if len(policies):
|
||||
self.build_list_params(params, policies, 'PolicyNames.member.%d')
|
||||
else:
|
||||
params['PolicyNames'] = ''
|
||||
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
|
||||
|
||||
def set_lb_policies_of_backend_server(self, lb_name, instance_port, policies):
|
||||
def set_lb_policies_of_backend_server(self, lb_name, instance_port,
|
||||
policies):
|
||||
"""
|
||||
Replaces the current set of policies associated with a port on which
|
||||
the back-end server is listening with a new set of policies.
|
||||
@ -649,7 +683,8 @@ class ELBConnection(AWSQueryConnection):
|
||||
self.build_list_params(params, policies, 'PolicyNames.member.%d')
|
||||
else:
|
||||
params['PolicyNames'] = ''
|
||||
return self.get_status('SetLoadBalancerPoliciesForBackendServer', params)
|
||||
return self.get_status('SetLoadBalancerPoliciesForBackendServer',
|
||||
params)
|
||||
|
||||
def apply_security_groups_to_lb(self, name, security_groups):
|
||||
"""
|
||||
|
||||
@ -74,6 +74,31 @@ class AccessLogAttribute(object):
|
||||
elif name == 'EmitInterval':
|
||||
self.emit_interval = int(value)
|
||||
|
||||
class ConnectionDrainingAttribute(object):
|
||||
"""
|
||||
Represents the ConnectionDraining segment of ELB attributes.
|
||||
"""
|
||||
def __init__(self, connection=None):
|
||||
self.enabled = None
|
||||
self.timeout = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'ConnectionDraining(%s, %s)' % (
|
||||
self.enabled,
|
||||
self.timeout
|
||||
)
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
pass
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
if name == 'Enabled':
|
||||
if value.lower() == 'true':
|
||||
self.enabled = True
|
||||
else:
|
||||
self.enabled = False
|
||||
elif name == 'Timeout':
|
||||
self.timeout = int(value)
|
||||
|
||||
class LbAttributes(object):
|
||||
"""
|
||||
@ -84,17 +109,21 @@ class LbAttributes(object):
|
||||
self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute(
|
||||
self.connection)
|
||||
self.access_log = AccessLogAttribute(self.connection)
|
||||
self.connection_draining = ConnectionDrainingAttribute(self.connection)
|
||||
|
||||
def __repr__(self):
|
||||
return 'LbAttributes(%s, %s)' % (
|
||||
return 'LbAttributes(%s, %s, %s)' % (
|
||||
repr(self.cross_zone_load_balancing),
|
||||
repr(self.access_log))
|
||||
repr(self.access_log),
|
||||
repr(self.connection_draining))
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'CrossZoneLoadBalancing':
|
||||
return self.cross_zone_load_balancing
|
||||
if name == 'AccessLog':
|
||||
return self.access_log
|
||||
if name == 'ConnectionDraining':
|
||||
return self.connection_draining
|
||||
|
||||
def endElement(self, name, value, connection):
|
||||
pass
|
||||
|
||||
@ -27,6 +27,7 @@ from boto.ec2.elb.policies import Policies, OtherPolicy
|
||||
from boto.ec2.elb.securitygroup import SecurityGroup
|
||||
from boto.ec2.instanceinfo import InstanceInfo
|
||||
from boto.resultset import ResultSet
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class Backend(object):
|
||||
@ -67,6 +68,7 @@ class LoadBalancerZones(object):
|
||||
def endElement(self, name, value, connection):
|
||||
pass
|
||||
|
||||
|
||||
class LoadBalancer(object):
|
||||
"""
|
||||
Represents an EC2 Load Balancer.
|
||||
@ -82,6 +84,7 @@ class LoadBalancer(object):
|
||||
check policy for this load balancer.
|
||||
:ivar boto.ec2.elb.policies.Policies policies: Cookie stickiness and
|
||||
other policies.
|
||||
:ivar str name: The name of the Load Balancer.
|
||||
:ivar str dns_name: The external DNS name for the balancer.
|
||||
:ivar str created_time: A date+time string showing when the
|
||||
load balancer was created.
|
||||
@ -186,7 +189,7 @@ class LoadBalancer(object):
|
||||
:param zones: The name of the zone(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(zones, basestring):
|
||||
if isinstance(zones, six.string_types):
|
||||
zones = [zones]
|
||||
new_zones = self.connection.enable_availability_zones(self.name, zones)
|
||||
self.availability_zones = new_zones
|
||||
@ -199,9 +202,10 @@ class LoadBalancer(object):
|
||||
:param zones: The name of the zone(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(zones, basestring):
|
||||
if isinstance(zones, six.string_types):
|
||||
zones = [zones]
|
||||
new_zones = self.connection.disable_availability_zones(self.name, zones)
|
||||
new_zones = self.connection.disable_availability_zones(
|
||||
self.name, zones)
|
||||
self.availability_zones = new_zones
|
||||
|
||||
def get_attributes(self, force=False):
|
||||
@ -266,7 +270,7 @@ class LoadBalancer(object):
|
||||
to add to this load balancer.
|
||||
|
||||
"""
|
||||
if isinstance(instances, basestring):
|
||||
if isinstance(instances, six.string_types):
|
||||
instances = [instances]
|
||||
new_instances = self.connection.register_instances(self.name,
|
||||
instances)
|
||||
@ -281,7 +285,7 @@ class LoadBalancer(object):
|
||||
to remove from this load balancer.
|
||||
|
||||
"""
|
||||
if isinstance(instances, basestring):
|
||||
if isinstance(instances, six.string_types):
|
||||
instances = [instances]
|
||||
new_instances = self.connection.deregister_instances(self.name,
|
||||
instances)
|
||||
@ -348,14 +352,13 @@ class LoadBalancer(object):
|
||||
policies)
|
||||
|
||||
def set_policies_of_backend_server(self, instance_port, policies):
|
||||
return self.connection.set_lb_policies_of_backend_server(self.name,
|
||||
instance_port,
|
||||
policies)
|
||||
|
||||
return self.connection.set_lb_policies_of_backend_server(
|
||||
self.name, instance_port, policies)
|
||||
|
||||
def create_cookie_stickiness_policy(self, cookie_expiration_period,
|
||||
policy_name):
|
||||
return self.connection.create_lb_cookie_stickiness_policy(cookie_expiration_period, self.name, policy_name)
|
||||
return self.connection.create_lb_cookie_stickiness_policy(
|
||||
cookie_expiration_period, self.name, policy_name)
|
||||
|
||||
def create_app_cookie_stickiness_policy(self, name, policy_name):
|
||||
return self.connection.create_app_cookie_stickiness_policy(name,
|
||||
@ -363,12 +366,12 @@ class LoadBalancer(object):
|
||||
policy_name)
|
||||
|
||||
def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id):
|
||||
return self.connection.set_lb_listener_SSL_certificate(self.name,
|
||||
lb_port,
|
||||
ssl_certificate_id)
|
||||
return self.connection.set_lb_listener_SSL_certificate(
|
||||
self.name, lb_port, ssl_certificate_id)
|
||||
|
||||
def create_lb_policy(self, policy_name, policy_type, policy_attribute):
|
||||
return self.connection.create_lb_policy(self.name, policy_name, policy_type, policy_attribute)
|
||||
return self.connection.create_lb_policy(
|
||||
self.name, policy_name, policy_type, policy_attribute)
|
||||
|
||||
def attach_subnets(self, subnets):
|
||||
"""
|
||||
@ -380,7 +383,7 @@ class LoadBalancer(object):
|
||||
:param subnets: The name of the subnet(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(subnets, basestring):
|
||||
if isinstance(subnets, six.string_types):
|
||||
subnets = [subnets]
|
||||
new_subnets = self.connection.attach_lb_to_subnets(self.name, subnets)
|
||||
self.subnets = new_subnets
|
||||
@ -393,9 +396,10 @@ class LoadBalancer(object):
|
||||
:param subnets: The name of the subnet(s) to detach.
|
||||
|
||||
"""
|
||||
if isinstance(subnets, basestring):
|
||||
if isinstance(subnets, six.string_types):
|
||||
subnets = [subnets]
|
||||
new_subnets = self.connection.detach_lb_from_subnets(self.name, subnets)
|
||||
new_subnets = self.connection.detach_lb_from_subnets(
|
||||
self.name, subnets)
|
||||
self.subnets = new_subnets
|
||||
|
||||
def apply_security_groups(self, security_groups):
|
||||
@ -408,8 +412,8 @@ class LoadBalancer(object):
|
||||
:param security_groups: The name of the security group(s) to add.
|
||||
|
||||
"""
|
||||
if isinstance(security_groups, basestring):
|
||||
if isinstance(security_groups, six.string_types):
|
||||
security_groups = [security_groups]
|
||||
new_sgs = self.connection.apply_security_groups_to_lb(
|
||||
self.name, security_groups)
|
||||
self.name, security_groups)
|
||||
self.security_groups = new_sgs
|
||||
|
||||
@ -233,6 +233,9 @@ class Image(TaggedEC2Object):
|
||||
* i2.2xlarge
|
||||
* i2.4xlarge
|
||||
* i2.8xlarge
|
||||
* t2.micro
|
||||
* t2.small
|
||||
* t2.medium
|
||||
|
||||
:type placement: string
|
||||
:param placement: The Availability Zone to launch the instance into.
|
||||
|
||||
@ -83,7 +83,7 @@ class KeyPair(EC2Object):
|
||||
fp = open(file_path, 'wb')
|
||||
fp.write(self.material)
|
||||
fp.close()
|
||||
os.chmod(file_path, 0600)
|
||||
os.chmod(file_path, 0o600)
|
||||
return True
|
||||
else:
|
||||
raise BotoClientError('KeyPair contains no material')
|
||||
|
||||
@ -167,6 +167,70 @@ class NetworkInterface(TaggedEC2Object):
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
def _update(self, updated):
|
||||
self.__dict__.update(updated.__dict__)
|
||||
|
||||
def update(self, validate=False, dry_run=False):
|
||||
"""
|
||||
Update the data associated with this ENI by querying EC2.
|
||||
|
||||
:type validate: bool
|
||||
:param validate: By default, if EC2 returns no data about the
|
||||
ENI the update method returns quietly. If
|
||||
the validate param is True, however, it will
|
||||
raise a ValueError exception if no data is
|
||||
returned from EC2.
|
||||
"""
|
||||
rs = self.connection.get_all_network_interfaces(
|
||||
[self.id],
|
||||
dry_run=dry_run
|
||||
)
|
||||
if len(rs) > 0:
|
||||
self._update(rs[0])
|
||||
elif validate:
|
||||
raise ValueError('%s is not a valid ENI ID' % self.id)
|
||||
return self.status
|
||||
|
||||
def attach(self, instance_id, device_index, dry_run=False):
|
||||
"""
|
||||
Attach this ENI to an EC2 instance.
|
||||
|
||||
:type instance_id: str
|
||||
:param instance_id: The ID of the EC2 instance to which it will
|
||||
be attached.
|
||||
|
||||
:type device_index: int
|
||||
:param device_index: The interface nunber, N, on the instance (eg. ethN)
|
||||
|
||||
:rtype: bool
|
||||
:return: True if successful
|
||||
"""
|
||||
return self.connection.attach_network_interface(
|
||||
self.id,
|
||||
instance_id,
|
||||
device_index,
|
||||
dry_run=dry_run
|
||||
)
|
||||
|
||||
def detach(self, force=False, dry_run=False):
|
||||
"""
|
||||
Detach this ENI from an EC2 instance.
|
||||
|
||||
:type force: bool
|
||||
:param force: Forces detachment if the previous detachment
|
||||
attempt did not occur cleanly.
|
||||
|
||||
:rtype: bool
|
||||
:return: True if successful
|
||||
"""
|
||||
attachment_id = getattr(self.attachment, 'id', None)
|
||||
|
||||
return self.connection.detach_network_interface(
|
||||
attachment_id,
|
||||
force,
|
||||
dry_run=dry_run
|
||||
)
|
||||
|
||||
def delete(self, dry_run=False):
|
||||
return self.connection.delete_network_interface(
|
||||
self.id,
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from boto.resultset import ResultSet
|
||||
from boto.ec2.ec2object import EC2Object
|
||||
from boto.utils import parse_ts
|
||||
@ -82,13 +81,13 @@ class ReservedInstancesOffering(EC2Object):
|
||||
self.marketplace = True if value == 'true' else False
|
||||
|
||||
def describe(self):
|
||||
print 'ID=%s' % self.id
|
||||
print '\tInstance Type=%s' % self.instance_type
|
||||
print '\tZone=%s' % self.availability_zone
|
||||
print '\tDuration=%s' % self.duration
|
||||
print '\tFixed Price=%s' % self.fixed_price
|
||||
print '\tUsage Price=%s' % self.usage_price
|
||||
print '\tDescription=%s' % self.description
|
||||
print('ID=%s' % self.id)
|
||||
print('\tInstance Type=%s' % self.instance_type)
|
||||
print('\tZone=%s' % self.availability_zone)
|
||||
print('\tDuration=%s' % self.duration)
|
||||
print('\tFixed Price=%s' % self.fixed_price)
|
||||
print('\tUsage Price=%s' % self.usage_price)
|
||||
print('\tDescription=%s' % self.description)
|
||||
|
||||
def purchase(self, instance_count=1, dry_run=False):
|
||||
return self.connection.purchase_reserved_instance_offering(
|
||||
|
||||
@ -41,6 +41,7 @@ class Snapshot(TaggedEC2Object):
|
||||
self.owner_alias = None
|
||||
self.volume_size = None
|
||||
self.description = None
|
||||
self.encrypted = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'Snapshot:%s' % self.id
|
||||
@ -65,6 +66,8 @@ class Snapshot(TaggedEC2Object):
|
||||
self.volume_size = value
|
||||
elif name == 'description':
|
||||
self.description = value
|
||||
elif name == 'encrypted':
|
||||
self.encrypted = (value.lower() == 'true')
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
@ -152,6 +155,7 @@ class Snapshot(TaggedEC2Object):
|
||||
self.id,
|
||||
volume_type,
|
||||
iops,
|
||||
self.encrypted,
|
||||
dry_run=dry_run
|
||||
)
|
||||
|
||||
|
||||
@ -44,6 +44,7 @@ class Volume(TaggedEC2Object):
|
||||
:ivar type: The type of volume (standard or consistent-iops)
|
||||
:ivar iops: If this volume is of type consistent-iops, this is
|
||||
the number of IOPS provisioned (10-300).
|
||||
:ivar encrypted: True if this volume is encrypted.
|
||||
"""
|
||||
|
||||
def __init__(self, connection=None):
|
||||
@ -57,6 +58,7 @@ class Volume(TaggedEC2Object):
|
||||
self.zone = None
|
||||
self.type = None
|
||||
self.iops = None
|
||||
self.encrypted = None
|
||||
|
||||
def __repr__(self):
|
||||
return 'Volume:%s' % self.id
|
||||
@ -92,6 +94,8 @@ class Volume(TaggedEC2Object):
|
||||
self.type = value
|
||||
elif name == 'iops':
|
||||
self.iops = int(value)
|
||||
elif name == 'encrypted':
|
||||
self.encrypted = (value.lower() == 'true')
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection, AWSAuthConnection
|
||||
from boto.exception import BotoServerError
|
||||
import time
|
||||
import urllib
|
||||
import xml.sax
|
||||
@ -61,20 +62,22 @@ class ECSConnection(AWSQueryConnection):
|
||||
if page:
|
||||
params['ItemPage'] = page
|
||||
response = self.make_request(None, params, "/onca/xml")
|
||||
body = response.read()
|
||||
body = response.read().decode('utf-8')
|
||||
boto.log.debug(body)
|
||||
|
||||
if response.status != 200:
|
||||
boto.log.error('%s %s' % (response.status, response.reason))
|
||||
boto.log.error('%s' % body)
|
||||
raise self.ResponseError(response.status, response.reason, body)
|
||||
raise BotoServerError(response.status, response.reason, body)
|
||||
|
||||
if itemSet is None:
|
||||
rs = ItemSet(self, action, params, page)
|
||||
else:
|
||||
rs = itemSet
|
||||
h = handler.XmlHandler(rs, self)
|
||||
xml.sax.parseString(body, h)
|
||||
xml.sax.parseString(body.encode('utf-8'), h)
|
||||
if not rs.is_valid:
|
||||
raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0]))
|
||||
return rs
|
||||
|
||||
#
|
||||
@ -91,3 +94,12 @@ class ECSConnection(AWSQueryConnection):
|
||||
"""
|
||||
params['SearchIndex'] = search_index
|
||||
return self.get_response('ItemSearch', params)
|
||||
|
||||
def item_lookup(self, **params):
|
||||
"""
|
||||
Returns items that satisfy the lookup query.
|
||||
|
||||
For a full list of parameters, see:
|
||||
http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf
|
||||
"""
|
||||
return self.get_response('ItemLookup', params)
|
||||
@ -22,7 +22,7 @@
|
||||
|
||||
import xml.sax
|
||||
import cgi
|
||||
from StringIO import StringIO
|
||||
from boto.compat import six, StringIO
|
||||
|
||||
class ResponseGroup(xml.sax.ContentHandler):
|
||||
"""A Generic "Response Group", which can
|
||||
@ -90,14 +90,14 @@ class Item(ResponseGroup):
|
||||
|
||||
def __init__(self, connection=None):
|
||||
"""Initialize this Item"""
|
||||
super(Item, self).__init__(connection, "Item")
|
||||
ResponseGroup.__init__(self, connection, "Item")
|
||||
|
||||
class ItemSet(ResponseGroup):
|
||||
"""A special ResponseGroup that has built-in paging, and
|
||||
only creates new Items on the "Item" tag"""
|
||||
|
||||
def __init__(self, connection, action, params, page=0):
|
||||
super(ItemSet, self).__init__(connection, "Items")
|
||||
ResponseGroup.__init__(self, connection, "Items")
|
||||
self.objs = []
|
||||
self.iter = None
|
||||
self.page = page
|
||||
@ -106,6 +106,8 @@ class ItemSet(ResponseGroup):
|
||||
self.curItem = None
|
||||
self.total_results = 0
|
||||
self.total_pages = 0
|
||||
self.is_valid = False
|
||||
self.errors = []
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == "Item":
|
||||
@ -119,7 +121,14 @@ class ItemSet(ResponseGroup):
|
||||
self.total_results = value
|
||||
elif name == 'TotalPages':
|
||||
self.total_pages = value
|
||||
elif name == "Item":
|
||||
elif name == 'IsValid':
|
||||
if value == 'True':
|
||||
self.is_valid = True
|
||||
elif name == 'Code':
|
||||
self.errors.append({'Code': value, 'Message': None})
|
||||
elif name == 'Message':
|
||||
self.errors[-1]['Message'] = value
|
||||
elif name == 'Item':
|
||||
self.objs.append(self.curItem)
|
||||
self._xml.write(self.curItem.to_xml())
|
||||
self.curItem = None
|
||||
@ -127,22 +136,24 @@ class ItemSet(ResponseGroup):
|
||||
self.curItem.endElement(name, value, connection)
|
||||
return None
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
"""Special paging functionality"""
|
||||
if self.iter is None:
|
||||
self.iter = iter(self.objs)
|
||||
try:
|
||||
return self.iter.next()
|
||||
return next(self.iter)
|
||||
except StopIteration:
|
||||
self.iter = None
|
||||
self.objs = []
|
||||
if int(self.page) < int(self.total_pages):
|
||||
self.page += 1
|
||||
self._connection.get_response(self.action, self.params, self.page, self)
|
||||
return self.next()
|
||||
return next(self)
|
||||
else:
|
||||
raise
|
||||
|
||||
next = __next__
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
@ -150,4 +161,4 @@ class ItemSet(ResponseGroup):
|
||||
"""Override to first fetch everything"""
|
||||
for item in self:
|
||||
pass
|
||||
return super(ItemSet, self).to_xml()
|
||||
return ResponseGroup.to_xml(self)
|
||||
|
||||
@ -1657,7 +1657,7 @@ class ElastiCacheConnection(AWSQueryConnection):
|
||||
params['ContentType'] = 'JSON'
|
||||
response = self.make_request(action=action, verb='POST',
|
||||
path='/', params=params)
|
||||
body = response.read()
|
||||
body = response.read().decode('utf-8')
|
||||
boto.log.debug(body)
|
||||
if response.status == 200:
|
||||
return json.loads(body)
|
||||
|
||||
@ -923,7 +923,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
|
||||
headers = {}
|
||||
response = super(ElasticTranscoderConnection, self).make_request(
|
||||
verb, resource, headers=headers, data=data)
|
||||
body = json.load(response)
|
||||
body = json.loads(response.read().decode('utf-8'))
|
||||
if response.status == expected_status:
|
||||
return body
|
||||
else:
|
||||
|
||||
@ -26,9 +26,9 @@
|
||||
This module provies an interface to the Elastic MapReduce (EMR)
|
||||
service from AWS.
|
||||
"""
|
||||
from connection import EmrConnection
|
||||
from step import Step, StreamingStep, JarStep
|
||||
from bootstrap_action import BootstrapAction
|
||||
from boto.emr.connection import EmrConnection
|
||||
from boto.emr.step import Step, StreamingStep, JarStep
|
||||
from boto.emr.bootstrap_action import BootstrapAction
|
||||
from boto.regioninfo import RegionInfo, get_regions
|
||||
|
||||
|
||||
|
||||
@ -20,12 +20,14 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from boto.compat import six
|
||||
|
||||
class BootstrapAction(object):
|
||||
def __init__(self, name, path, bootstrap_action_args):
|
||||
self.name = name
|
||||
self.path = path
|
||||
|
||||
if isinstance(bootstrap_action_args, basestring):
|
||||
if isinstance(bootstrap_action_args, six.string_types):
|
||||
bootstrap_action_args = [bootstrap_action_args]
|
||||
|
||||
self.bootstrap_action_args = bootstrap_action_args
|
||||
|
||||
@ -37,6 +37,7 @@ from boto.emr.emrobject import AddInstanceGroupsResponse, BootstrapActionList, \
|
||||
from boto.emr.step import JarStep
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.exception import EmrResponseError
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class EmrConnection(AWSQueryConnection):
|
||||
@ -281,7 +282,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
value for that tag should be the empty string
|
||||
(e.g. '') or None.
|
||||
"""
|
||||
assert isinstance(resource_id, basestring)
|
||||
assert isinstance(resource_id, six.string_types)
|
||||
params = {
|
||||
'ResourceId': resource_id,
|
||||
}
|
||||
@ -333,7 +334,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
:type steps: list(boto.emr.Step)
|
||||
:param steps: A list of steps to add to the job
|
||||
"""
|
||||
if not isinstance(steps, types.ListType):
|
||||
if not isinstance(steps, list):
|
||||
steps = [steps]
|
||||
params = {}
|
||||
params['JobFlowId'] = jobflow_id
|
||||
@ -356,7 +357,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
:type instance_groups: list(boto.emr.InstanceGroup)
|
||||
:param instance_groups: A list of instance groups to add to the job
|
||||
"""
|
||||
if not isinstance(instance_groups, types.ListType):
|
||||
if not isinstance(instance_groups, list):
|
||||
instance_groups = [instance_groups]
|
||||
params = {}
|
||||
params['JobFlowId'] = jobflow_id
|
||||
@ -377,9 +378,9 @@ class EmrConnection(AWSQueryConnection):
|
||||
:type new_sizes: list(int)
|
||||
:param new_sizes: A list of the new sizes for each instance group
|
||||
"""
|
||||
if not isinstance(instance_group_ids, types.ListType):
|
||||
if not isinstance(instance_group_ids, list):
|
||||
instance_group_ids = [instance_group_ids]
|
||||
if not isinstance(new_sizes, types.ListType):
|
||||
if not isinstance(new_sizes, list):
|
||||
new_sizes = [new_sizes]
|
||||
|
||||
instance_groups = zip(instance_group_ids, new_sizes)
|
||||
@ -409,7 +410,8 @@ class EmrConnection(AWSQueryConnection):
|
||||
ami_version=None,
|
||||
api_params=None,
|
||||
visible_to_all_users=None,
|
||||
job_flow_role=None):
|
||||
job_flow_role=None,
|
||||
service_role=None):
|
||||
"""
|
||||
Runs a job flow
|
||||
:type name: str
|
||||
@ -491,6 +493,10 @@ class EmrConnection(AWSQueryConnection):
|
||||
``EMRJobflowDefault``. In order to use the default role,
|
||||
you must have already created it using the CLI.
|
||||
|
||||
:type service_role: str
|
||||
:param service_role: The IAM role that will be assumed by the Amazon
|
||||
EMR service to access AWS resources on your behalf.
|
||||
|
||||
:rtype: str
|
||||
:return: The jobflow id
|
||||
"""
|
||||
@ -524,7 +530,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
# Instance group args (for spot instances or a heterogenous cluster)
|
||||
list_args = self._build_instance_group_list_args(instance_groups)
|
||||
instance_params = dict(
|
||||
('Instances.%s' % k, v) for k, v in list_args.iteritems()
|
||||
('Instances.%s' % k, v) for k, v in six.iteritems(list_args)
|
||||
)
|
||||
params.update(instance_params)
|
||||
|
||||
@ -553,7 +559,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
params['AdditionalInfo'] = additional_info
|
||||
|
||||
if api_params:
|
||||
for key, value in api_params.iteritems():
|
||||
for key, value in six.iteritems(api_params):
|
||||
if value is None:
|
||||
params.pop(key, None)
|
||||
else:
|
||||
@ -568,6 +574,9 @@ class EmrConnection(AWSQueryConnection):
|
||||
if job_flow_role is not None:
|
||||
params['JobFlowRole'] = job_flow_role
|
||||
|
||||
if service_role is not None:
|
||||
params['ServiceRole'] = service_role
|
||||
|
||||
response = self.get_object(
|
||||
'RunJobFlow', params, RunJobFlowResponse, verb='POST')
|
||||
return response.jobflowid
|
||||
@ -641,27 +650,27 @@ class EmrConnection(AWSQueryConnection):
|
||||
return step_params
|
||||
|
||||
def _build_bootstrap_action_list(self, bootstrap_actions):
|
||||
if not isinstance(bootstrap_actions, types.ListType):
|
||||
if not isinstance(bootstrap_actions, list):
|
||||
bootstrap_actions = [bootstrap_actions]
|
||||
|
||||
params = {}
|
||||
for i, bootstrap_action in enumerate(bootstrap_actions):
|
||||
for key, value in bootstrap_action.iteritems():
|
||||
for key, value in six.iteritems(bootstrap_action):
|
||||
params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value
|
||||
return params
|
||||
|
||||
def _build_step_list(self, steps):
|
||||
if not isinstance(steps, types.ListType):
|
||||
if not isinstance(steps, list):
|
||||
steps = [steps]
|
||||
|
||||
params = {}
|
||||
for i, step in enumerate(steps):
|
||||
for key, value in step.iteritems():
|
||||
for key, value in six.iteritems(step):
|
||||
params['Steps.member.%s.%s' % (i+1, key)] = value
|
||||
return params
|
||||
|
||||
def _build_string_list(self, field, items):
|
||||
if not isinstance(items, types.ListType):
|
||||
if not isinstance(items, list):
|
||||
items = [items]
|
||||
|
||||
params = {}
|
||||
@ -673,7 +682,7 @@ class EmrConnection(AWSQueryConnection):
|
||||
assert isinstance(tags, dict)
|
||||
|
||||
params = {}
|
||||
for i, key_value in enumerate(sorted(tags.iteritems()), start=1):
|
||||
for i, key_value in enumerate(sorted(six.iteritems(tags)), start=1):
|
||||
key, value = key_value
|
||||
current_prefix = 'Tags.member.%s' % i
|
||||
params['%s.Key' % current_prefix] = key
|
||||
@ -734,12 +743,12 @@ class EmrConnection(AWSQueryConnection):
|
||||
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
|
||||
request.
|
||||
"""
|
||||
if not isinstance(instance_groups, types.ListType):
|
||||
if not isinstance(instance_groups, list):
|
||||
instance_groups = [instance_groups]
|
||||
|
||||
params = {}
|
||||
for i, instance_group in enumerate(instance_groups):
|
||||
ig_dict = self._build_instance_group_args(instance_group)
|
||||
for key, value in ig_dict.iteritems():
|
||||
for key, value in six.iteritems(ig_dict):
|
||||
params['InstanceGroups.member.%d.%s' % (i+1, key)] = value
|
||||
return params
|
||||
|
||||
@ -301,7 +301,7 @@ class ClusterSummaryList(EmrObject):
|
||||
|
||||
class StepConfig(EmrObject):
|
||||
Fields = set([
|
||||
'Jar'
|
||||
'Jar',
|
||||
'MainClass'
|
||||
])
|
||||
|
||||
@ -434,11 +434,15 @@ class StepSummary(EmrObject):
|
||||
def __init__(self, connection=None):
|
||||
self.connection = connection
|
||||
self.status = None
|
||||
self.config = None
|
||||
|
||||
def startElement(self, name, attrs, connection):
|
||||
if name == 'Status':
|
||||
self.status = ClusterStatus()
|
||||
return self.status
|
||||
elif name == 'Config':
|
||||
self.config = StepConfig()
|
||||
return self.config
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@ -20,6 +20,8 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
class Step(object):
|
||||
"""
|
||||
@ -73,7 +75,7 @@ class JarStep(Step):
|
||||
self._main_class = main_class
|
||||
self.action_on_failure = action_on_failure
|
||||
|
||||
if isinstance(step_args, basestring):
|
||||
if isinstance(step_args, six.string_types):
|
||||
step_args = [step_args]
|
||||
|
||||
self.step_args = step_args
|
||||
@ -143,7 +145,7 @@ class StreamingStep(Step):
|
||||
self.output = output
|
||||
self._jar = jar
|
||||
|
||||
if isinstance(step_args, basestring):
|
||||
if isinstance(step_args, six.string_types):
|
||||
step_args = [step_args]
|
||||
|
||||
self.step_args = step_args
|
||||
|
||||
@ -26,10 +26,12 @@ Exception classes - Subclassing allows you to check for specific errors
|
||||
"""
|
||||
import base64
|
||||
import xml.sax
|
||||
from boto import handler
|
||||
from boto.compat import json
|
||||
from boto.resultset import ResultSet
|
||||
|
||||
import boto
|
||||
|
||||
from boto import handler
|
||||
from boto.compat import json, six, StandardError
|
||||
from boto.resultset import ResultSet
|
||||
|
||||
class BotoClientError(StandardError):
|
||||
"""
|
||||
@ -80,34 +82,56 @@ class BotoServerError(StandardError):
|
||||
self.request_id = None
|
||||
self.error_code = None
|
||||
self._error_message = None
|
||||
self.message = ''
|
||||
self.box_usage = None
|
||||
|
||||
if isinstance(self.body, bytes):
|
||||
try:
|
||||
self.body = self.body.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
boto.log.debug('Unable to decode body from bytes!')
|
||||
|
||||
# Attempt to parse the error response. If body isn't present,
|
||||
# then just ignore the error response.
|
||||
if self.body:
|
||||
try:
|
||||
h = handler.XmlHandlerWrapper(self, self)
|
||||
h.parseString(self.body)
|
||||
except (TypeError, xml.sax.SAXParseException), pe:
|
||||
# What if it's JSON? Let's try that.
|
||||
# Check if it looks like a ``dict``.
|
||||
if hasattr(self.body, 'items'):
|
||||
# It's not a string, so trying to parse it will fail.
|
||||
# But since it's data, we can work with that.
|
||||
self.request_id = self.body.get('RequestId', None)
|
||||
|
||||
if 'Error' in self.body:
|
||||
# XML-style
|
||||
error = self.body.get('Error', {})
|
||||
self.error_code = error.get('Code', None)
|
||||
self.message = error.get('Message', None)
|
||||
else:
|
||||
# JSON-style.
|
||||
self.message = self.body.get('message', None)
|
||||
else:
|
||||
try:
|
||||
parsed = json.loads(self.body)
|
||||
h = handler.XmlHandlerWrapper(self, self)
|
||||
h.parseString(self.body)
|
||||
except (TypeError, xml.sax.SAXParseException) as pe:
|
||||
# What if it's JSON? Let's try that.
|
||||
try:
|
||||
parsed = json.loads(self.body)
|
||||
|
||||
if 'RequestId' in parsed:
|
||||
self.request_id = parsed['RequestId']
|
||||
if 'Error' in parsed:
|
||||
if 'Code' in parsed['Error']:
|
||||
self.error_code = parsed['Error']['Code']
|
||||
if 'Message' in parsed['Error']:
|
||||
self.message = parsed['Error']['Message']
|
||||
if 'RequestId' in parsed:
|
||||
self.request_id = parsed['RequestId']
|
||||
if 'Error' in parsed:
|
||||
if 'Code' in parsed['Error']:
|
||||
self.error_code = parsed['Error']['Code']
|
||||
if 'Message' in parsed['Error']:
|
||||
self.message = parsed['Error']['Message']
|
||||
|
||||
except ValueError:
|
||||
# Remove unparsable message body so we don't include garbage
|
||||
# in exception. But first, save self.body in self.error_message
|
||||
# because occasionally we get error messages from Eucalyptus
|
||||
# that are just text strings that we want to preserve.
|
||||
self.message = self.body
|
||||
self.body = None
|
||||
except (TypeError, ValueError):
|
||||
# Remove unparsable message body so we don't include garbage
|
||||
# in exception. But first, save self.body in self.error_message
|
||||
# because occasionally we get error messages from Eucalyptus
|
||||
# that are just text strings that we want to preserve.
|
||||
self.message = self.body
|
||||
self.body = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == 'error_message':
|
||||
|
||||
@ -21,8 +21,8 @@
|
||||
|
||||
import boto
|
||||
|
||||
from connection import FileConnection as Connection
|
||||
from key import Key
|
||||
from bucket import Bucket
|
||||
from boto.file.connection import FileConnection as Connection
|
||||
from boto.file.key import Key
|
||||
from boto.file.bucket import Bucket
|
||||
|
||||
__all__ = ['Connection', 'Key', 'Bucket']
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
# File representation of bucket, for use with "file://" URIs.
|
||||
|
||||
import os
|
||||
from key import Key
|
||||
from boto.file.key import Key
|
||||
from boto.file.simpleresultset import SimpleResultSet
|
||||
from boto.s3.bucketlistresultset import BucketListResultSet
|
||||
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
|
||||
# File representation of connection, for use with "file://" URIs.
|
||||
|
||||
from bucket import Bucket
|
||||
from boto.file.bucket import Bucket
|
||||
|
||||
class FileConnection(object):
|
||||
|
||||
|
||||
@ -22,9 +22,11 @@
|
||||
|
||||
# File representation of key, for use with "file://" URIs.
|
||||
|
||||
import os, shutil, StringIO
|
||||
import os, shutil
|
||||
import sys
|
||||
|
||||
from boto.compat import StringIO
|
||||
|
||||
class Key(object):
|
||||
|
||||
KEY_STREAM_READABLE = 0x01
|
||||
@ -182,7 +184,7 @@ class Key(object):
|
||||
:returns: The contents of the file as a string
|
||||
"""
|
||||
|
||||
fp = StringIO.StringIO()
|
||||
fp = StringIO()
|
||||
self.get_contents_to_file(fp)
|
||||
return fp.getvalue()
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ def needs_caller_reference(func):
|
||||
def api_action(*api):
|
||||
|
||||
def decorator(func):
|
||||
action = ''.join(api or map(str.capitalize, func.func_name.split('_')))
|
||||
action = ''.join(api or map(str.capitalize, func.__name__.split('_')))
|
||||
response = ResponseFactory(action)
|
||||
if hasattr(boto.fps.response, action + 'Response'):
|
||||
response = getattr(boto.fps.response, action + 'Response')
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
from decimal import Decimal
|
||||
from boto.compat import filter, map
|
||||
|
||||
|
||||
def ResponseFactory(action):
|
||||
|
||||
@ -19,21 +19,20 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import math
|
||||
import threading
|
||||
import hashlib
|
||||
import time
|
||||
import logging
|
||||
from Queue import Queue, Empty
|
||||
from boto.compat import Queue
|
||||
import binascii
|
||||
|
||||
from .utils import DEFAULT_PART_SIZE, minimum_part_size, chunk_hashes, \
|
||||
tree_hash, bytes_to_hex
|
||||
from .exceptions import UploadArchiveError, DownloadArchiveError, \
|
||||
TreeHashDoesNotMatchError
|
||||
from boto.glacier.utils import DEFAULT_PART_SIZE, minimum_part_size, \
|
||||
chunk_hashes, tree_hash, bytes_to_hex
|
||||
from boto.glacier.exceptions import UploadArchiveError, \
|
||||
DownloadArchiveError, \
|
||||
TreeHashDoesNotMatchError
|
||||
|
||||
|
||||
_END_SENTINEL = object()
|
||||
@ -68,9 +67,9 @@ class ConcurrentTransferer(object):
|
||||
|
||||
def _add_work_items_to_queue(self, total_parts, worker_queue, part_size):
|
||||
log.debug("Adding work items to queue.")
|
||||
for i in xrange(total_parts):
|
||||
for i in range(total_parts):
|
||||
worker_queue.put((i, part_size))
|
||||
for i in xrange(self._num_threads):
|
||||
for i in range(self._num_threads):
|
||||
worker_queue.put(_END_SENTINEL)
|
||||
|
||||
|
||||
@ -146,7 +145,7 @@ class ConcurrentUploader(ConcurrentTransferer):
|
||||
try:
|
||||
self._wait_for_upload_threads(hash_chunks, result_queue,
|
||||
total_parts)
|
||||
except UploadArchiveError, e:
|
||||
except UploadArchiveError as e:
|
||||
log.debug("An error occurred while uploading an archive, "
|
||||
"aborting multipart upload.")
|
||||
self._api.abort_multipart_upload(self._vault_name, upload_id)
|
||||
@ -159,7 +158,7 @@ class ConcurrentUploader(ConcurrentTransferer):
|
||||
return response['ArchiveId']
|
||||
|
||||
def _wait_for_upload_threads(self, hash_chunks, result_queue, total_parts):
|
||||
for _ in xrange(total_parts):
|
||||
for _ in range(total_parts):
|
||||
result = result_queue.get()
|
||||
if isinstance(result, Exception):
|
||||
log.debug("An error was found in the result queue, terminating "
|
||||
@ -177,7 +176,7 @@ class ConcurrentUploader(ConcurrentTransferer):
|
||||
def _start_upload_threads(self, result_queue, upload_id, worker_queue,
|
||||
filename):
|
||||
log.debug("Starting threads.")
|
||||
for _ in xrange(self._num_threads):
|
||||
for _ in range(self._num_threads):
|
||||
thread = UploadWorkerThread(self._api, self._vault_name, filename,
|
||||
upload_id, worker_queue, result_queue)
|
||||
time.sleep(0.2)
|
||||
@ -231,11 +230,11 @@ class UploadWorkerThread(TransferThread):
|
||||
|
||||
def _process_chunk(self, work):
|
||||
result = None
|
||||
for i in xrange(self._num_retries + 1):
|
||||
for i in range(self._num_retries + 1):
|
||||
try:
|
||||
result = self._upload_chunk(work)
|
||||
break
|
||||
except self._retry_exceptions, e:
|
||||
except self._retry_exceptions as e:
|
||||
log.error("Exception caught uploading part number %s for "
|
||||
"vault %s, attempt: (%s / %s), filename: %s, "
|
||||
"exception: %s, msg: %s",
|
||||
@ -306,7 +305,7 @@ class ConcurrentDownloader(ConcurrentTransferer):
|
||||
self._start_download_threads(result_queue, worker_queue)
|
||||
try:
|
||||
self._wait_for_download_threads(filename, result_queue, total_parts)
|
||||
except DownloadArchiveError, e:
|
||||
except DownloadArchiveError as e:
|
||||
log.debug("An error occurred while downloading an archive: %s", e)
|
||||
raise e
|
||||
log.debug("Download completed.")
|
||||
@ -324,7 +323,7 @@ class ConcurrentDownloader(ConcurrentTransferer):
|
||||
"""
|
||||
hash_chunks = [None] * total_parts
|
||||
with open(filename, "wb") as f:
|
||||
for _ in xrange(total_parts):
|
||||
for _ in range(total_parts):
|
||||
result = result_queue.get()
|
||||
if isinstance(result, Exception):
|
||||
log.debug("An error was found in the result queue, "
|
||||
@ -352,7 +351,7 @@ class ConcurrentDownloader(ConcurrentTransferer):
|
||||
|
||||
def _start_download_threads(self, result_queue, worker_queue):
|
||||
log.debug("Starting threads.")
|
||||
for _ in xrange(self._num_threads):
|
||||
for _ in range(self._num_threads):
|
||||
thread = DownloadWorkerThread(self._job, worker_queue, result_queue)
|
||||
time.sleep(0.2)
|
||||
thread.start()
|
||||
@ -393,11 +392,11 @@ class DownloadWorkerThread(TransferThread):
|
||||
:param work:
|
||||
"""
|
||||
result = None
|
||||
for _ in xrange(self._num_retries):
|
||||
for _ in range(self._num_retries):
|
||||
try:
|
||||
result = self._download_chunk(work)
|
||||
break
|
||||
except self._retry_exceptions, e:
|
||||
except self._retry_exceptions as e:
|
||||
log.error("Exception caught downloading part number %s for "
|
||||
"job %s", work[0], self._job,)
|
||||
time.sleep(self._time_between_retries)
|
||||
|
||||
@ -20,12 +20,12 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from __future__ import with_statement
|
||||
import math
|
||||
import socket
|
||||
|
||||
from .exceptions import TreeHashDoesNotMatchError, DownloadArchiveError
|
||||
from .utils import tree_hash_from_str
|
||||
from boto.glacier.exceptions import TreeHashDoesNotMatchError, \
|
||||
DownloadArchiveError
|
||||
from boto.glacier.utils import tree_hash_from_str
|
||||
|
||||
|
||||
class Job(object):
|
||||
@ -123,7 +123,7 @@ class Job(object):
|
||||
verify_hashes, retry_exceptions)
|
||||
|
||||
def download_to_fileobj(self, output_file, chunk_size=DefaultPartSize,
|
||||
verify_hashes=True,
|
||||
verify_hashes=True,
|
||||
retry_exceptions=(socket.error,)):
|
||||
"""Download an archive to a file object.
|
||||
|
||||
@ -146,7 +146,7 @@ class Job(object):
|
||||
|
||||
def _download_to_fileob(self, fileobj, num_chunks, chunk_size, verify_hashes,
|
||||
retry_exceptions):
|
||||
for i in xrange(num_chunks):
|
||||
for i in range(num_chunks):
|
||||
byte_range = ((i * chunk_size), ((i + 1) * chunk_size) - 1)
|
||||
data, expected_tree_hash = self._download_byte_range(
|
||||
byte_range, retry_exceptions)
|
||||
@ -163,13 +163,13 @@ class Job(object):
|
||||
# You can occasionally get socket.errors when downloading
|
||||
# chunks from Glacier, so each chunk can be retried up
|
||||
# to 5 times.
|
||||
for _ in xrange(5):
|
||||
for _ in range(5):
|
||||
try:
|
||||
response = self.get_output(byte_range)
|
||||
data = response.read()
|
||||
expected_tree_hash = response['TreeHash']
|
||||
return data, expected_tree_hash
|
||||
except retry_exceptions, e:
|
||||
except retry_exceptions as e:
|
||||
continue
|
||||
else:
|
||||
raise DownloadArchiveError("There was an error downloading"
|
||||
|
||||
@ -27,9 +27,9 @@ import os
|
||||
import boto.glacier
|
||||
from boto.compat import json
|
||||
from boto.connection import AWSAuthConnection
|
||||
from .exceptions import UnexpectedHTTPResponseError
|
||||
from .response import GlacierResponse
|
||||
from .utils import ResettingFileSender
|
||||
from boto.glacier.exceptions import UnexpectedHTTPResponseError
|
||||
from boto.glacier.response import GlacierResponse
|
||||
from boto.glacier.utils import ResettingFileSender
|
||||
|
||||
|
||||
class Layer1(AWSAuthConnection):
|
||||
@ -89,12 +89,13 @@ class Layer1(AWSAuthConnection):
|
||||
self.region = region
|
||||
self.account_id = account_id
|
||||
super(Layer1, self).__init__(region.endpoint,
|
||||
aws_access_key_id, aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
proxy_user, proxy_pass, debug,
|
||||
https_connection_factory,
|
||||
path, provider, security_token,
|
||||
suppress_consec_slashes, profile_name=profile_name)
|
||||
aws_access_key_id, aws_secret_access_key,
|
||||
is_secure, port, proxy, proxy_port,
|
||||
proxy_user, proxy_pass, debug,
|
||||
https_connection_factory,
|
||||
path, provider, security_token,
|
||||
suppress_consec_slashes,
|
||||
profile_name=profile_name)
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
@ -107,10 +108,10 @@ class Layer1(AWSAuthConnection):
|
||||
headers['x-amz-glacier-version'] = self.Version
|
||||
uri = '/%s/%s' % (self.account_id, resource)
|
||||
response = super(Layer1, self).make_request(verb, uri,
|
||||
params=params,
|
||||
headers=headers,
|
||||
sender=sender,
|
||||
data=data)
|
||||
params=params,
|
||||
headers=headers,
|
||||
sender=sender,
|
||||
data=data)
|
||||
if response.status in ok_responses:
|
||||
return GlacierResponse(response, response_headers)
|
||||
else:
|
||||
@ -826,9 +827,9 @@ class Layer1(AWSAuthConnection):
|
||||
else:
|
||||
sender = None
|
||||
return self.make_request('POST', uri, headers=headers,
|
||||
sender=sender,
|
||||
data=archive, ok_responses=(201,),
|
||||
response_headers=response_headers)
|
||||
sender=sender,
|
||||
data=archive, ok_responses=(201,),
|
||||
response_headers=response_headers)
|
||||
|
||||
def _is_file_like(self, archive):
|
||||
return hasattr(archive, 'seek') and hasattr(archive, 'tell')
|
||||
|
||||
@ -21,8 +21,8 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
from .layer1 import Layer1
|
||||
from .vault import Vault
|
||||
from boto.glacier.layer1 import Layer1
|
||||
from boto.glacier.vault import Vault
|
||||
|
||||
|
||||
class Layer2(object):
|
||||
|
||||
@ -37,7 +37,7 @@ class GlacierResponse(dict):
|
||||
for header_name, item_name in response_headers:
|
||||
self[item_name] = http_response.getheader(header_name)
|
||||
if http_response.getheader('Content-Type') == 'application/json':
|
||||
body = json.loads(http_response.read())
|
||||
body = json.loads(http_response.read().decode('utf-8'))
|
||||
self.update(body)
|
||||
size = http_response.getheader('Content-Length', None)
|
||||
if size is not None:
|
||||
|
||||
@ -21,6 +21,9 @@
|
||||
#
|
||||
import hashlib
|
||||
import math
|
||||
import binascii
|
||||
|
||||
from boto.compat import six
|
||||
|
||||
|
||||
_MEGABYTE = 1024 * 1024
|
||||
@ -71,12 +74,12 @@ def minimum_part_size(size_in_bytes, default_part_size=DEFAULT_PART_SIZE):
|
||||
def chunk_hashes(bytestring, chunk_size=_MEGABYTE):
|
||||
chunk_count = int(math.ceil(len(bytestring) / float(chunk_size)))
|
||||
hashes = []
|
||||
for i in xrange(chunk_count):
|
||||
for i in range(chunk_count):
|
||||
start = i * chunk_size
|
||||
end = (i + 1) * chunk_size
|
||||
hashes.append(hashlib.sha256(bytestring[start:end]).digest())
|
||||
if not hashes:
|
||||
return [hashlib.sha256('').digest()]
|
||||
return [hashlib.sha256(b'').digest()]
|
||||
return hashes
|
||||
|
||||
|
||||
@ -121,20 +124,29 @@ def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024):
|
||||
are returned in hex.
|
||||
|
||||
"""
|
||||
# Python 3+, not binary
|
||||
if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode:
|
||||
raise ValueError('File-like object must be opened in binary mode!')
|
||||
|
||||
linear_hash = hashlib.sha256()
|
||||
chunks = []
|
||||
chunk = fileobj.read(chunk_size)
|
||||
while chunk:
|
||||
# It's possible to get a file-like object that has no mode (checked
|
||||
# above) and returns something other than bytes (e.g. str). So here
|
||||
# we try to catch that and encode to bytes.
|
||||
if not isinstance(chunk, bytes):
|
||||
chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8')
|
||||
linear_hash.update(chunk)
|
||||
chunks.append(hashlib.sha256(chunk).digest())
|
||||
chunk = fileobj.read(chunk_size)
|
||||
if not chunks:
|
||||
chunks = [hashlib.sha256('').digest()]
|
||||
chunks = [hashlib.sha256(b'').digest()]
|
||||
return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks))
|
||||
|
||||
|
||||
def bytes_to_hex(str_as_bytes):
|
||||
return ''.join(["%02x" % ord(x) for x in str_as_bytes]).strip()
|
||||
return binascii.hexlify(str_as_bytes)
|
||||
|
||||
|
||||
def tree_hash_from_str(str_as_bytes):
|
||||
|
||||
@ -21,12 +21,13 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from __future__ import with_statement
|
||||
from .exceptions import UploadArchiveError
|
||||
from .job import Job
|
||||
from .writer import compute_hashes_from_fileobj, resume_file_upload, Writer
|
||||
from .concurrent import ConcurrentUploader
|
||||
from .utils import minimum_part_size, DEFAULT_PART_SIZE
|
||||
import codecs
|
||||
from boto.glacier.exceptions import UploadArchiveError
|
||||
from boto.glacier.job import Job
|
||||
from boto.glacier.writer import compute_hashes_from_fileobj, \
|
||||
resume_file_upload, Writer
|
||||
from boto.glacier.concurrent import ConcurrentUploader
|
||||
from boto.glacier.utils import minimum_part_size, DEFAULT_PART_SIZE
|
||||
import os.path
|
||||
|
||||
|
||||
@ -54,8 +55,6 @@ class Vault(object):
|
||||
if response_data:
|
||||
for response_name, attr_name, default in self.ResponseDataElements:
|
||||
value = response_data[response_name]
|
||||
if isinstance(value, unicode):
|
||||
value = value.encode('utf8')
|
||||
setattr(self, attr_name, value)
|
||||
else:
|
||||
for response_name, attr_name, default in self.ResponseDataElements:
|
||||
@ -227,7 +226,7 @@ class Vault(object):
|
||||
for part_desc in part_list_response['Parts']:
|
||||
part_index = self._range_string_to_part_index(
|
||||
part_desc['RangeInBytes'], part_size)
|
||||
part_tree_hash = part_desc['SHA256TreeHash'].decode('hex')
|
||||
part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'], 'hex_codec')
|
||||
part_hash_map[part_index] = part_tree_hash
|
||||
|
||||
if not file_obj:
|
||||
@ -343,9 +342,9 @@ class Vault(object):
|
||||
rparams = {}
|
||||
|
||||
if start_date is not None:
|
||||
rparams['StartDate'] = start_date.isoformat()
|
||||
rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z')
|
||||
if end_date is not None:
|
||||
rparams['EndDate'] = end_date.isoformat()
|
||||
rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z')
|
||||
if limit is not None:
|
||||
rparams['Limit'] = limit
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ class _Partitioner(object):
|
||||
self._buffer_size = 0
|
||||
|
||||
def write(self, data):
|
||||
if data == '':
|
||||
if data == b'':
|
||||
return
|
||||
self._buffer.append(data)
|
||||
self._buffer_size += len(data)
|
||||
@ -61,7 +61,7 @@ class _Partitioner(object):
|
||||
self._send_part()
|
||||
|
||||
def _send_part(self):
|
||||
data = ''.join(self._buffer)
|
||||
data = b''.join(self._buffer)
|
||||
# Put back any data remaining over the part size into the
|
||||
# buffer
|
||||
if len(data) > self.part_size:
|
||||
@ -164,7 +164,7 @@ class _Uploader(object):
|
||||
def generate_parts_from_fobj(fobj, part_size):
|
||||
data = fobj.read(part_size)
|
||||
while data:
|
||||
yield data
|
||||
yield data.encode('utf-8')
|
||||
data = fobj.read(part_size)
|
||||
|
||||
|
||||
|
||||
@ -37,6 +37,7 @@ from boto.gs.key import Key as GSKey
|
||||
from boto.s3.acl import Policy
|
||||
from boto.s3.bucket import Bucket as S3Bucket
|
||||
from boto.utils import get_utf8_value
|
||||
from boto.compat import six
|
||||
|
||||
# constants for http query args
|
||||
DEF_OBJ_ACL = 'defaultObjectAcl'
|
||||
@ -100,12 +101,12 @@ class Bucket(S3Bucket):
|
||||
if generation:
|
||||
query_args_l.append('generation=%s' % generation)
|
||||
if response_headers:
|
||||
for rk, rv in response_headers.iteritems():
|
||||
for rk, rv in six.iteritems(response_headers):
|
||||
query_args_l.append('%s=%s' % (rk, urllib.quote(rv)))
|
||||
try:
|
||||
key, resp = self._get_key_internal(key_name, headers,
|
||||
query_args_l=query_args_l)
|
||||
except GSResponseError, e:
|
||||
except GSResponseError as e:
|
||||
if e.status == 403 and 'Forbidden' in e.reason:
|
||||
# If we failed getting an object, let the user know which object
|
||||
# failed rather than just returning a generic 403.
|
||||
|
||||
@ -156,7 +156,7 @@ class Cors(handler.ContentHandler):
|
||||
s += '<' + collection + '>'
|
||||
# If collection elements has type string, append atomic value,
|
||||
# otherwise, append sequence of values in named tags.
|
||||
if isinstance(elements_or_value, types.StringTypes):
|
||||
if isinstance(elements_or_value, str):
|
||||
s += elements_or_value
|
||||
else:
|
||||
for (name, value) in elements_or_value:
|
||||
|
||||
@ -23,7 +23,8 @@ import base64
|
||||
import binascii
|
||||
import os
|
||||
import re
|
||||
import StringIO
|
||||
|
||||
from boto.compat import StringIO
|
||||
from boto.exception import BotoClientError
|
||||
from boto.s3.key import Key as S3Key
|
||||
from boto.s3.keyfile import KeyFile
|
||||
@ -410,19 +411,20 @@ class Key(S3Key):
|
||||
contents.
|
||||
|
||||
:type fp: file
|
||||
:param fp: the file whose contents are to be uploaded
|
||||
:param fp: The file whose contents are to be uploaded.
|
||||
|
||||
:type headers: dict
|
||||
:param headers: additional HTTP headers to be sent with the PUT request.
|
||||
:param headers: (optional) Additional HTTP headers to be sent with the
|
||||
PUT request.
|
||||
|
||||
:type replace: bool
|
||||
:param replace: If this parameter is False, the method will first check
|
||||
to see if an object exists in the bucket with the same key. If it
|
||||
does, it won't overwrite it. The default value is True which will
|
||||
overwrite the object.
|
||||
:param replace: (optional) If this parameter is False, the method will
|
||||
first check to see if an object exists in the bucket with the same
|
||||
key. If it does, it won't overwrite it. The default value is True
|
||||
which will overwrite the object.
|
||||
|
||||
:type cb: function
|
||||
:param cb: a callback function that will be called to report
|
||||
:param cb: (optional) Callback function that will be called to report
|
||||
progress on the upload. The callback should accept two integer
|
||||
parameters, the first representing the number of bytes that have
|
||||
been successfully transmitted to GS and the second representing the
|
||||
@ -435,43 +437,44 @@ class Key(S3Key):
|
||||
during the file transfer.
|
||||
|
||||
:type policy: :class:`boto.gs.acl.CannedACLStrings`
|
||||
:param policy: A canned ACL policy that will be applied to the new key
|
||||
in GS.
|
||||
:param policy: (optional) A canned ACL policy that will be applied to
|
||||
the new key in GS.
|
||||
|
||||
:type md5: A tuple containing the hexdigest version of the MD5 checksum
|
||||
of the file as the first element and the Base64-encoded version of
|
||||
the plain checksum as the second element. This is the same format
|
||||
returned by the compute_md5 method.
|
||||
:param md5: If you need to compute the MD5 for any reason prior to
|
||||
upload, it's silly to have to do it twice so this param, if present,
|
||||
will be used as the MD5 values of the file. Otherwise, the checksum
|
||||
will be computed.
|
||||
:type md5: tuple
|
||||
:param md5: (optional) A tuple containing the hexdigest version of the
|
||||
MD5 checksum of the file as the first element and the
|
||||
Base64-encoded version of the plain checksum as the second element.
|
||||
This is the same format returned by the compute_md5 method.
|
||||
|
||||
:type res_upload_handler: ResumableUploadHandler
|
||||
:param res_upload_handler: If provided, this handler will perform the
|
||||
upload.
|
||||
If you need to compute the MD5 for any reason prior to upload, it's
|
||||
silly to have to do it twice so this param, if present, will be
|
||||
used as the MD5 values of the file. Otherwise, the checksum will be
|
||||
computed.
|
||||
|
||||
:type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler`
|
||||
:param res_upload_handler: (optional) If provided, this handler will
|
||||
perform the upload.
|
||||
|
||||
:type size: int
|
||||
:param size: (optional) The Maximum number of bytes to read from
|
||||
the file pointer (fp). This is useful when uploading
|
||||
a file in multiple parts where you are splitting the
|
||||
file up into different ranges to be uploaded. If not
|
||||
specified, the default behaviour is to read all bytes
|
||||
from the file pointer. Less bytes may be available.
|
||||
:param size: (optional) The Maximum number of bytes to read from the
|
||||
file pointer (fp). This is useful when uploading a file in multiple
|
||||
parts where you are splitting the file up into different ranges to
|
||||
be uploaded. If not specified, the default behaviour is to read all
|
||||
bytes from the file pointer. Less bytes may be available.
|
||||
|
||||
Notes:
|
||||
|
||||
1. The "size" parameter currently cannot be used when
|
||||
a resumable upload handler is given but is still
|
||||
useful for uploading part of a file as implemented
|
||||
by the parent class.
|
||||
2. At present Google Cloud Storage does not support
|
||||
multipart uploads.
|
||||
1. The "size" parameter currently cannot be used when a
|
||||
resumable upload handler is given but is still useful for
|
||||
uploading part of a file as implemented by the parent class.
|
||||
2. At present Google Cloud Storage does not support multipart
|
||||
uploads.
|
||||
|
||||
:type rewind: bool
|
||||
:param rewind: (optional) If True, the file pointer (fp) will be
|
||||
rewound to the start before any bytes are read from
|
||||
it. The default behaviour is False which reads from
|
||||
the current position of the file pointer (fp).
|
||||
:param rewind: (optional) If True, the file pointer (fp) will be
|
||||
rewound to the start before any bytes are read from it. The default
|
||||
behaviour is False which reads from the current position of the
|
||||
file pointer (fp).
|
||||
|
||||
:type if_generation: int
|
||||
:param if_generation: (optional) If set to a generation number, the
|
||||
@ -588,44 +591,47 @@ class Key(S3Key):
|
||||
parameters.
|
||||
|
||||
:type filename: string
|
||||
:param filename: The name of the file that you want to put onto GS
|
||||
:param filename: The name of the file that you want to put onto GS.
|
||||
|
||||
:type headers: dict
|
||||
:param headers: Additional headers to pass along with the request to GS.
|
||||
:param headers: (optional) Additional headers to pass along with the
|
||||
request to GS.
|
||||
|
||||
:type replace: bool
|
||||
:param replace: If True, replaces the contents of the file if it
|
||||
already exists.
|
||||
:param replace: (optional) If True, replaces the contents of the file
|
||||
if it already exists.
|
||||
|
||||
:type cb: function
|
||||
:param cb: (optional) a callback function that will be called to report
|
||||
progress on the download. The callback should accept two integer
|
||||
:param cb: (optional) Callback function that will be called to report
|
||||
progress on the upload. The callback should accept two integer
|
||||
parameters, the first representing the number of bytes that have
|
||||
been successfully transmitted from GS and the second representing
|
||||
the total number of bytes that need to be transmitted.
|
||||
been successfully transmitted to GS and the second representing the
|
||||
total number of bytes that need to be transmitted.
|
||||
|
||||
:type cb: int
|
||||
:type num_cb: int
|
||||
:param num_cb: (optional) If a callback is specified with the cb
|
||||
parameter this parameter determines the granularity of the callback
|
||||
by defining the maximum number of times the callback will be called
|
||||
during the file transfer.
|
||||
|
||||
:type policy: :class:`boto.gs.acl.CannedACLStrings`
|
||||
:param policy: A canned ACL policy that will be applied to the new key
|
||||
in GS.
|
||||
:type policy: :py:attribute:`boto.gs.acl.CannedACLStrings`
|
||||
:param policy: (optional) A canned ACL policy that will be applied to
|
||||
the new key in GS.
|
||||
|
||||
:type md5: A tuple containing the hexdigest version of the MD5 checksum
|
||||
of the file as the first element and the Base64-encoded version of
|
||||
the plain checksum as the second element. This is the same format
|
||||
returned by the compute_md5 method.
|
||||
:param md5: If you need to compute the MD5 for any reason prior to
|
||||
upload, it's silly to have to do it twice so this param, if present,
|
||||
will be used as the MD5 values of the file. Otherwise, the checksum
|
||||
will be computed.
|
||||
:type md5: tuple
|
||||
:param md5: (optional) A tuple containing the hexdigest version of the
|
||||
MD5 checksum of the file as the first element and the
|
||||
Base64-encoded version of the plain checksum as the second element.
|
||||
This is the same format returned by the compute_md5 method.
|
||||
|
||||
:type res_upload_handler: ResumableUploadHandler
|
||||
:param res_upload_handler: If provided, this handler will perform the
|
||||
upload.
|
||||
If you need to compute the MD5 for any reason prior to upload, it's
|
||||
silly to have to do it twice so this param, if present, will be
|
||||
used as the MD5 values of the file. Otherwise, the checksum will be
|
||||
computed.
|
||||
|
||||
:type res_upload_handler: :py:class:`boto.gs.resumable_upload_handler.ResumableUploadHandler`
|
||||
:param res_upload_handler: (optional) If provided, this handler will
|
||||
perform the upload.
|
||||
|
||||
:type if_generation: int
|
||||
:param if_generation: (optional) If set to a generation number, the
|
||||
@ -699,7 +705,7 @@ class Key(S3Key):
|
||||
self.md5 = None
|
||||
self.base64md5 = None
|
||||
|
||||
fp = StringIO.StringIO(get_utf8_value(s))
|
||||
fp = StringIO(get_utf8_value(s))
|
||||
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
|
||||
policy, md5,
|
||||
if_generation=if_generation)
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import errno
|
||||
import httplib
|
||||
import os
|
||||
@ -27,16 +26,13 @@ import re
|
||||
import socket
|
||||
import time
|
||||
import urlparse
|
||||
from hashlib import md5
|
||||
from boto import config, UserAgent
|
||||
from boto.connection import AWSAuthConnection
|
||||
from boto.exception import InvalidUriError
|
||||
from boto.exception import ResumableTransferDisposition
|
||||
from boto.exception import ResumableUploadException
|
||||
from boto.s3.keyfile import KeyFile
|
||||
try:
|
||||
from hashlib import md5
|
||||
except ImportError:
|
||||
from md5 import md5
|
||||
|
||||
"""
|
||||
Handler for Google Cloud Storage resumable uploads. See
|
||||
@ -98,7 +94,7 @@ class ResumableUploadHandler(object):
|
||||
f = open(self.tracker_file_name, 'r')
|
||||
uri = f.readline().strip()
|
||||
self._set_tracker_uri(uri)
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
# Ignore non-existent file (happens first time an upload
|
||||
# is attempted on a file), but warn user for other errors.
|
||||
if e.errno != errno.ENOENT:
|
||||
@ -106,7 +102,7 @@ class ResumableUploadHandler(object):
|
||||
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
|
||||
'upload from scratch.' %
|
||||
(self.tracker_file_name, e.strerror))
|
||||
except InvalidUriError, e:
|
||||
except InvalidUriError as e:
|
||||
# Warn user, but proceed (will restart because
|
||||
# self.tracker_uri is None).
|
||||
print('Invalid tracker URI (%s) found in URI tracker file '
|
||||
@ -125,9 +121,9 @@ class ResumableUploadHandler(object):
|
||||
f = None
|
||||
try:
|
||||
with os.fdopen(os.open(self.tracker_file_name,
|
||||
os.O_WRONLY | os.O_CREAT, 0600), 'w') as f:
|
||||
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
|
||||
f.write(self.tracker_uri)
|
||||
except IOError, e:
|
||||
except IOError as e:
|
||||
raise ResumableUploadException(
|
||||
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
|
||||
'if you\'re using an incorrectly configured upload tool\n'
|
||||
@ -256,7 +252,7 @@ class ResumableUploadHandler(object):
|
||||
'Couldn\'t parse upload server state query response (%s)' %
|
||||
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
|
||||
if conn.debug >= 1:
|
||||
print 'Server has: Range: %d - %d.' % (server_start, server_end)
|
||||
print('Server has: Range: %d - %d.' % (server_start, server_end))
|
||||
return (server_start, server_end)
|
||||
|
||||
def _start_new_resumable_upload(self, key, headers=None):
|
||||
@ -267,7 +263,7 @@ class ResumableUploadHandler(object):
|
||||
"""
|
||||
conn = key.bucket.connection
|
||||
if conn.debug >= 1:
|
||||
print 'Starting new resumable upload.'
|
||||
print('Starting new resumable upload.')
|
||||
self.server_has_bytes = 0
|
||||
|
||||
# Start a new resumable upload by sending a POST request with an
|
||||
@ -433,7 +429,7 @@ class ResumableUploadHandler(object):
|
||||
# If the server already has some of the content, we need to
|
||||
# update the digesters with the bytes that have already been
|
||||
# uploaded to ensure we get a complete hash in the end.
|
||||
print 'Catching up hash digest(s) for resumed upload'
|
||||
print('Catching up hash digest(s) for resumed upload')
|
||||
fp.seek(0)
|
||||
# Read local file's bytes through position server has. For
|
||||
# example, if server has (0, 3) we want to read 3-0+1=4 bytes.
|
||||
@ -453,10 +449,10 @@ class ResumableUploadHandler(object):
|
||||
bytes_to_go -= len(chunk)
|
||||
|
||||
if conn.debug >= 1:
|
||||
print 'Resuming transfer.'
|
||||
except ResumableUploadException, e:
|
||||
print('Resuming transfer.')
|
||||
except ResumableUploadException as e:
|
||||
if conn.debug >= 1:
|
||||
print 'Unable to resume transfer (%s).' % e.message
|
||||
print('Unable to resume transfer (%s).' % e.message)
|
||||
self._start_new_resumable_upload(key, headers)
|
||||
else:
|
||||
self._start_new_resumable_upload(key, headers)
|
||||
@ -513,7 +509,7 @@ class ResumableUploadHandler(object):
|
||||
change some of the file and not realize they have inconsistent data.
|
||||
"""
|
||||
if key.bucket.connection.debug >= 1:
|
||||
print 'Checking md5 against etag.'
|
||||
print('Checking md5 against etag.')
|
||||
if key.md5 != etag.strip('"\''):
|
||||
# Call key.open_read() before attempting to delete the
|
||||
# (incorrect-content) key, so we perform that request on a
|
||||
@ -567,7 +563,7 @@ class ResumableUploadHandler(object):
|
||||
# Use binary exponential backoff to desynchronize client requests.
|
||||
sleep_time_secs = random.random() * (2**self.progress_less_iterations)
|
||||
if debug >= 1:
|
||||
print ('Got retryable failure (%d progress-less in a row).\n'
|
||||
print('Got retryable failure (%d progress-less in a row).\n'
|
||||
'Sleeping %3.1f seconds before re-trying' %
|
||||
(self.progress_less_iterations, sleep_time_secs))
|
||||
time.sleep(sleep_time_secs)
|
||||
@ -664,9 +660,9 @@ class ResumableUploadHandler(object):
|
||||
self._check_final_md5(key, etag)
|
||||
key.generation = self.generation
|
||||
if debug >= 1:
|
||||
print 'Resumable upload complete.'
|
||||
print('Resumable upload complete.')
|
||||
return
|
||||
except self.RETRYABLE_EXCEPTIONS, e:
|
||||
except self.RETRYABLE_EXCEPTIONS as e:
|
||||
if debug >= 1:
|
||||
print('Caught exception (%s)' % e.__repr__())
|
||||
if isinstance(e, IOError) and e.errno == errno.EPIPE:
|
||||
@ -676,7 +672,7 @@ class ResumableUploadHandler(object):
|
||||
# the upload (which will cause a new connection to be
|
||||
# opened the next time an HTTP request is sent).
|
||||
key.bucket.connection.connection.close()
|
||||
except ResumableUploadException, e:
|
||||
except ResumableUploadException as e:
|
||||
self.handle_resumable_upload_exception(e, debug)
|
||||
|
||||
self.track_progress_less_iterations(server_had_bytes_before_attempt,
|
||||
|
||||
@ -19,9 +19,10 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import StringIO
|
||||
import xml.sax
|
||||
|
||||
from boto.compat import StringIO
|
||||
|
||||
class XmlHandler(xml.sax.ContentHandler):
|
||||
|
||||
def __init__(self, root_node, connection):
|
||||
@ -55,4 +56,4 @@ class XmlHandlerWrapper(object):
|
||||
self.parser.setFeature(xml.sax.handler.feature_external_ges, 0)
|
||||
|
||||
def parseString(self, content):
|
||||
return self.parser.parse(StringIO.StringIO(content))
|
||||
return self.parser.parse(StringIO(content))
|
||||
|
||||
@ -19,14 +19,15 @@
|
||||
|
||||
"""Extensions to allow HTTPS requests with SSL certificate validation."""
|
||||
|
||||
import httplib
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
import boto
|
||||
|
||||
class InvalidCertificateException(httplib.HTTPException):
|
||||
from boto.compat import six, http_client
|
||||
|
||||
class InvalidCertificateException(http_client.HTTPException):
|
||||
"""Raised when a certificate is provided with an invalid hostname."""
|
||||
|
||||
def __init__(self, host, cert, reason):
|
||||
@ -36,7 +37,7 @@ class InvalidCertificateException(httplib.HTTPException):
|
||||
host: The hostname the connection was made to.
|
||||
cert: The SSL certificate (as a dictionary) the host returned.
|
||||
"""
|
||||
httplib.HTTPException.__init__(self)
|
||||
http_client.HTTPException.__init__(self)
|
||||
self.host = host
|
||||
self.cert = cert
|
||||
self.reason = reason
|
||||
@ -79,10 +80,10 @@ def ValidateCertificateHostname(cert, hostname):
|
||||
return False
|
||||
|
||||
|
||||
class CertValidatingHTTPSConnection(httplib.HTTPConnection):
|
||||
class CertValidatingHTTPSConnection(http_client.HTTPConnection):
|
||||
"""An HTTPConnection that connects over SSL and validates certificates."""
|
||||
|
||||
default_port = httplib.HTTPS_PORT
|
||||
default_port = http_client.HTTPS_PORT
|
||||
|
||||
def __init__(self, host, port=default_port, key_file=None, cert_file=None,
|
||||
ca_certs=None, strict=None, **kwargs):
|
||||
@ -98,17 +99,23 @@ class CertValidatingHTTPSConnection(httplib.HTTPConnection):
|
||||
strict: When true, causes BadStatusLine to be raised if the status line
|
||||
can't be parsed as a valid HTTP/1.0 or 1.1 status line.
|
||||
"""
|
||||
httplib.HTTPConnection.__init__(self, host, port, strict, **kwargs)
|
||||
if six.PY2:
|
||||
# Python 3.2 and newer have deprecated and removed the strict
|
||||
# parameter. Since the params are supported as keyword arguments
|
||||
# we conditionally add it here.
|
||||
kwargs['strict'] = strict
|
||||
|
||||
http_client.HTTPConnection.__init__(self, host=host, port=port, **kwargs)
|
||||
self.key_file = key_file
|
||||
self.cert_file = cert_file
|
||||
self.ca_certs = ca_certs
|
||||
|
||||
def connect(self):
|
||||
"Connect to a host on a given (SSL) port."
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if hasattr(self, "timeout") and self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect((self.host, self.port))
|
||||
if hasattr(self, "timeout"):
|
||||
sock = socket.create_connection((self.host, self.port), self.timeout)
|
||||
else:
|
||||
sock = socket.create_connection((self.host, self.port))
|
||||
msg = "wrapping ssl socket; "
|
||||
if self.ca_certs:
|
||||
msg += "CA certificate file=%s" %self.ca_certs
|
||||
|
||||
@ -21,16 +21,37 @@
|
||||
# IN THE SOFTWARE.
|
||||
import boto
|
||||
import boto.jsonresponse
|
||||
from boto.compat import json
|
||||
from boto.compat import json, six
|
||||
from boto.resultset import ResultSet
|
||||
from boto.iam.summarymap import SummaryMap
|
||||
from boto.connection import AWSQueryConnection
|
||||
|
||||
|
||||
ASSUME_ROLE_POLICY_DOCUMENT = json.dumps({
|
||||
'Statement': [{'Principal': {'Service': ['ec2.amazonaws.com']},
|
||||
'Effect': 'Allow',
|
||||
'Action': ['sts:AssumeRole']}]})
|
||||
DEFAULT_POLICY_DOCUMENTS = {
|
||||
'default': {
|
||||
'Statement': [
|
||||
{
|
||||
'Principal': {
|
||||
'Service': ['ec2.amazonaws.com']
|
||||
},
|
||||
'Effect': 'Allow',
|
||||
'Action': ['sts:AssumeRole']
|
||||
}
|
||||
]
|
||||
},
|
||||
'amazonaws.com.cn': {
|
||||
'Statement': [
|
||||
{
|
||||
'Principal': {
|
||||
'Service': ['ec2.amazonaws.com.cn']
|
||||
},
|
||||
'Effect': 'Allow',
|
||||
'Action': ['sts:AssumeRole']
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
# For backward-compatibility, we'll preserve this here.
|
||||
ASSUME_ROLE_POLICY_DOCUMENT = json.dumps(DEFAULT_POLICY_DOCUMENTS['default'])
|
||||
|
||||
|
||||
class IAMConnection(AWSQueryConnection):
|
||||
@ -40,7 +61,7 @@ class IAMConnection(AWSQueryConnection):
|
||||
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
|
||||
is_secure=True, port=None, proxy=None, proxy_port=None,
|
||||
proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
|
||||
debug=0, https_connection_factory=None, path='/',
|
||||
debug=0, https_connection_factory=None, path='/',
|
||||
security_token=None, validate_certs=True, profile_name=None):
|
||||
super(IAMConnection, self).__init__(aws_access_key_id,
|
||||
aws_secret_access_key,
|
||||
@ -1006,13 +1027,35 @@ class IAMConnection(AWSQueryConnection):
|
||||
:param service: Default service to go to in the console.
|
||||
"""
|
||||
alias = self.get_account_alias()
|
||||
|
||||
if not alias:
|
||||
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
|
||||
|
||||
resp = alias.get('list_account_aliases_response', {})
|
||||
result = resp.get('list_account_aliases_result', {})
|
||||
aliases = result.get('account_aliases', [])
|
||||
|
||||
if not len(aliases):
|
||||
raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
|
||||
|
||||
# We'll just use the first one we find.
|
||||
alias = aliases[0]
|
||||
|
||||
if self.host == 'iam.us-gov.amazonaws.com':
|
||||
return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (alias, service)
|
||||
return "https://%s.signin.amazonaws-us-gov.com/console/%s" % (
|
||||
alias,
|
||||
service
|
||||
)
|
||||
elif self.host.endswith('amazonaws.com.cn'):
|
||||
return "https://%s.signin.amazonaws.cn/console/%s" % (
|
||||
alias,
|
||||
service
|
||||
)
|
||||
else:
|
||||
return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service)
|
||||
return "https://%s.signin.aws.amazon.com/console/%s" % (
|
||||
alias,
|
||||
service
|
||||
)
|
||||
|
||||
def get_account_summary(self):
|
||||
"""
|
||||
@ -1059,6 +1102,30 @@ class IAMConnection(AWSQueryConnection):
|
||||
params['Path'] = path
|
||||
return self.get_response('CreateInstanceProfile', params)
|
||||
|
||||
def _build_policy(self, assume_role_policy_document=None):
|
||||
if assume_role_policy_document is not None:
|
||||
if isinstance(assume_role_policy_document, six.string_types):
|
||||
# Historically, they had to pass a string. If it's a string,
|
||||
# assume the user has already handled it.
|
||||
return assume_role_policy_document
|
||||
else:
|
||||
|
||||
for tld, policy in DEFAULT_POLICY_DOCUMENTS.items():
|
||||
if tld is 'default':
|
||||
# Skip the default. We'll fall back to it if we don't find
|
||||
# anything.
|
||||
continue
|
||||
|
||||
if self.host and self.host.endswith(tld):
|
||||
assume_role_policy_document = policy
|
||||
break
|
||||
|
||||
if not assume_role_policy_document:
|
||||
assume_role_policy_document = DEFAULT_POLICY_DOCUMENTS['default']
|
||||
|
||||
# Dump the policy (either user-supplied ``dict`` or one of the defaults)
|
||||
return json.dumps(assume_role_policy_document)
|
||||
|
||||
def create_role(self, role_name, assume_role_policy_document=None, path=None):
|
||||
"""
|
||||
Creates a new role for your AWS account.
|
||||
@ -1070,21 +1137,19 @@ class IAMConnection(AWSQueryConnection):
|
||||
:type role_name: string
|
||||
:param role_name: Name of the role to create.
|
||||
|
||||
:type assume_role_policy_document: string
|
||||
:type assume_role_policy_document: ``string`` or ``dict``
|
||||
:param assume_role_policy_document: The policy that grants an entity
|
||||
permission to assume the role.
|
||||
|
||||
:type path: string
|
||||
:param path: The path to the instance profile.
|
||||
:param path: The path to the role.
|
||||
"""
|
||||
params = {'RoleName': role_name}
|
||||
if assume_role_policy_document is None:
|
||||
# This is the only valid assume_role_policy_document currently, so
|
||||
# this is used as a default value if no assume_role_policy_document
|
||||
# is provided.
|
||||
params['AssumeRolePolicyDocument'] = ASSUME_ROLE_POLICY_DOCUMENT
|
||||
else:
|
||||
params['AssumeRolePolicyDocument'] = assume_role_policy_document
|
||||
params = {
|
||||
'RoleName': role_name,
|
||||
'AssumeRolePolicyDocument': self._build_policy(
|
||||
assume_role_policy_document
|
||||
),
|
||||
}
|
||||
if path is not None:
|
||||
params['Path'] = path
|
||||
return self.get_response('CreateRole', params)
|
||||
@ -1375,7 +1440,7 @@ class IAMConnection(AWSQueryConnection):
|
||||
Lists the SAML providers in the account.
|
||||
This operation requires `Signature Version 4`_.
|
||||
"""
|
||||
return self.get_response('ListSAMLProviders', {})
|
||||
return self.get_response('ListSAMLProviders', {}, list_marker='SAMLProviderList')
|
||||
|
||||
def get_saml_provider(self, saml_provider_arn):
|
||||
"""
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
import xml.sax
|
||||
import utils
|
||||
from boto import utils
|
||||
|
||||
class XmlHandler(xml.sax.ContentHandler):
|
||||
|
||||
@ -49,6 +49,8 @@ class XmlHandler(xml.sax.ContentHandler):
|
||||
self.current_text += content
|
||||
|
||||
def parse(self, s):
|
||||
if not isinstance(s, bytes):
|
||||
s = s.encode('utf-8')
|
||||
xml.sax.parseString(s, self)
|
||||
|
||||
class Element(dict):
|
||||
|
||||
@ -20,11 +20,6 @@
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
import simplejson as json
|
||||
|
||||
import base64
|
||||
import boto
|
||||
|
||||
@ -32,6 +27,7 @@ from boto.connection import AWSQueryConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.kinesis import exceptions
|
||||
from boto.compat import json
|
||||
|
||||
|
||||
class KinesisConnection(AWSQueryConnection):
|
||||
@ -293,7 +289,8 @@ class KinesisConnection(AWSQueryConnection):
|
||||
# Base64 decode the data
|
||||
if b64_decode:
|
||||
for record in response.get('Records', []):
|
||||
record['Data'] = base64.b64decode(record['Data'])
|
||||
record['Data'] = base64.b64decode(
|
||||
record['Data'].encode('utf-8')).decode('utf-8')
|
||||
|
||||
return response
|
||||
|
||||
@ -594,7 +591,8 @@ class KinesisConnection(AWSQueryConnection):
|
||||
if sequence_number_for_ordering is not None:
|
||||
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
|
||||
if b64_encode:
|
||||
params['Data'] = base64.b64encode(params['Data'])
|
||||
params['Data'] = base64.b64encode(
|
||||
params['Data'].encode('utf-8')).decode('utf-8')
|
||||
return self.make_request(action='PutRecord',
|
||||
body=json.dumps(params))
|
||||
|
||||
@ -695,7 +693,7 @@ class KinesisConnection(AWSQueryConnection):
|
||||
headers=headers, data=body)
|
||||
response = self._mexe(http_request, sender=None,
|
||||
override_num_retries=10)
|
||||
response_body = response.read()
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response.getheaders())
|
||||
boto.log.debug(response_body)
|
||||
if response.status == 200:
|
||||
|
||||
41
awx/lib/site-packages/boto/logs/__init__.py
Normal file
41
awx/lib/site-packages/boto/logs/__init__.py
Normal file
@ -0,0 +1,41 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.regioninfo import get_regions
|
||||
|
||||
|
||||
def regions():
|
||||
"""
|
||||
Get all available regions for the CloudWatch Logs service.
|
||||
|
||||
:rtype: list
|
||||
:return: A list of :class:`boto.regioninfo.RegionInfo`
|
||||
"""
|
||||
from boto.logs.layer1 import CloudWatchLogsConnection
|
||||
return get_regions('logs', connection_cls=CloudWatchLogsConnection)
|
||||
|
||||
|
||||
def connect_to_region(region_name, **kw_params):
|
||||
for region in regions():
|
||||
if region.name == region_name:
|
||||
return region.connect(**kw_params)
|
||||
return None
|
||||
59
awx/lib/site-packages/boto/logs/exceptions.py
Normal file
59
awx/lib/site-packages/boto/logs/exceptions.py
Normal file
@ -0,0 +1,59 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
from boto.exception import BotoServerError
|
||||
|
||||
|
||||
class LimitExceededException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class DataAlreadyAcceptedException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceInUseException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class ServiceUnavailableException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidParameterException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceNotFoundException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceAlreadyExistsException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class OperationAbortedException(BotoServerError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidSequenceTokenException(BotoServerError):
|
||||
pass
|
||||
577
awx/lib/site-packages/boto/logs/layer1.py
Normal file
577
awx/lib/site-packages/boto/logs/layer1.py
Normal file
@ -0,0 +1,577 @@
|
||||
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish, dis-
|
||||
# tribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to the fol-
|
||||
# lowing conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
|
||||
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
#
|
||||
|
||||
import boto
|
||||
from boto.connection import AWSQueryConnection
|
||||
from boto.regioninfo import RegionInfo
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.logs import exceptions
|
||||
from boto.compat import json
|
||||
|
||||
|
||||
class CloudWatchLogsConnection(AWSQueryConnection):
|
||||
"""
|
||||
Amazon CloudWatch Logs Service API Reference
|
||||
This is the Amazon CloudWatch Logs API Reference . Amazon
|
||||
CloudWatch Logs is a managed service for real time monitoring and
|
||||
archival of application logs. This guide provides detailed
|
||||
information about Amazon CloudWatch Logs actions, data types,
|
||||
parameters, and errors. For detailed information about Amazon
|
||||
CloudWatch Logs features and their associated API calls, go to the
|
||||
`Amazon CloudWatch Logs Developer Guide`_.
|
||||
|
||||
Use the following links to get started using the Amazon CloudWatch
|
||||
API Reference :
|
||||
|
||||
|
||||
+ `Actions`_: An alphabetical list of all Amazon CloudWatch Logs
|
||||
actions.
|
||||
+ `Data Types`_: An alphabetical list of all Amazon CloudWatch
|
||||
Logs data types.
|
||||
+ `Common Parameters`_: Parameters that all Query actions can use.
|
||||
+ `Common Errors`_: Client and server errors that all actions can
|
||||
return.
|
||||
+ `Regions and Endpoints`_: Itemized regions and endpoints for all
|
||||
AWS products.
|
||||
|
||||
|
||||
In addition to using the Amazon CloudWatch Logs API, you can also
|
||||
use the following SDKs and third-party libraries to access Amazon
|
||||
CloudWatch Logs programmatically.
|
||||
|
||||
|
||||
+ `AWS SDK for Java Documentation`_
|
||||
+ `AWS SDK for .NET Documentation`_
|
||||
+ `AWS SDK for PHP Documentation`_
|
||||
+ `AWS SDK for Ruby Documentation`_
|
||||
|
||||
|
||||
Developers in the AWS developer community also provide their own
|
||||
libraries, which you can find at the following AWS developer
|
||||
centers:
|
||||
|
||||
|
||||
+ `AWS Java Developer Center`_
|
||||
+ `AWS PHP Developer Center`_
|
||||
+ `AWS Python Developer Center`_
|
||||
+ `AWS Ruby Developer Center`_
|
||||
+ `AWS Windows and .NET Developer Center`_
|
||||
"""
|
||||
APIVersion = "2014-03-28"
|
||||
DefaultRegionName = "us-east-1"
|
||||
DefaultRegionEndpoint = "logs.us-east-1.amazonaws.com"
|
||||
ServiceName = "CloudWatchLogs"
|
||||
TargetPrefix = "Logs_20140328"
|
||||
ResponseError = JSONResponseError
|
||||
|
||||
_faults = {
|
||||
"LimitExceededException": exceptions.LimitExceededException,
|
||||
"DataAlreadyAcceptedException": exceptions.DataAlreadyAcceptedException,
|
||||
"ResourceInUseException": exceptions.ResourceInUseException,
|
||||
"ServiceUnavailableException": exceptions.ServiceUnavailableException,
|
||||
"InvalidParameterException": exceptions.InvalidParameterException,
|
||||
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
|
||||
"ResourceAlreadyExistsException": exceptions.ResourceAlreadyExistsException,
|
||||
"OperationAbortedException": exceptions.OperationAbortedException,
|
||||
"InvalidSequenceTokenException": exceptions.InvalidSequenceTokenException,
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
region = kwargs.pop('region', None)
|
||||
if not region:
|
||||
region = RegionInfo(self, self.DefaultRegionName,
|
||||
self.DefaultRegionEndpoint)
|
||||
|
||||
if 'host' not in kwargs or kwargs['host'] is None:
|
||||
kwargs['host'] = region.endpoint
|
||||
|
||||
super(CloudWatchLogsConnection, self).__init__(**kwargs)
|
||||
self.region = region
|
||||
|
||||
def _required_auth_capability(self):
|
||||
return ['hmac-v4']
|
||||
|
||||
def create_log_group(self, log_group_name):
|
||||
"""
|
||||
Creates a new log group with the specified name. The name of
|
||||
the log group must be unique within a region for an AWS
|
||||
account. You can create up to 100 log groups per account.
|
||||
|
||||
You must use the following guidelines when naming a log group:
|
||||
|
||||
+ Log group names can be between 1 and 512 characters long.
|
||||
+ Allowed characters are az, AZ, 09, '_' (underscore), '-'
|
||||
(hyphen), '/' (forward slash), and '.' (period).
|
||||
|
||||
|
||||
|
||||
Log groups are created with a default retention of 14 days.
|
||||
The retention attribute allow you to configure the number of
|
||||
days you want to retain log events in the specified log group.
|
||||
See the `SetRetention` operation on how to modify the
|
||||
retention of your log groups.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
"""
|
||||
params = {'logGroupName': log_group_name, }
|
||||
return self.make_request(action='CreateLogGroup',
|
||||
body=json.dumps(params))
|
||||
|
||||
def create_log_stream(self, log_group_name, log_stream_name):
|
||||
"""
|
||||
Creates a new log stream in the specified log group. The name
|
||||
of the log stream must be unique within the log group. There
|
||||
is no limit on the number of log streams that can exist in a
|
||||
log group.
|
||||
|
||||
You must use the following guidelines when naming a log
|
||||
stream:
|
||||
|
||||
+ Log stream names can be between 1 and 512 characters long.
|
||||
+ The ':' colon character is not allowed.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type log_stream_name: string
|
||||
:param log_stream_name:
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'logStreamName': log_stream_name,
|
||||
}
|
||||
return self.make_request(action='CreateLogStream',
|
||||
body=json.dumps(params))
|
||||
|
||||
def delete_log_group(self, log_group_name):
|
||||
"""
|
||||
Deletes the log group with the specified name. Amazon
|
||||
CloudWatch Logs will delete a log group only if there are no
|
||||
log streams and no metric filters associated with the log
|
||||
group. If this condition is not satisfied, the request will
|
||||
fail and the log group will not be deleted.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
"""
|
||||
params = {'logGroupName': log_group_name, }
|
||||
return self.make_request(action='DeleteLogGroup',
|
||||
body=json.dumps(params))
|
||||
|
||||
def delete_log_stream(self, log_group_name, log_stream_name):
|
||||
"""
|
||||
Deletes a log stream and permanently deletes all the archived
|
||||
log events associated with it.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type log_stream_name: string
|
||||
:param log_stream_name:
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'logStreamName': log_stream_name,
|
||||
}
|
||||
return self.make_request(action='DeleteLogStream',
|
||||
body=json.dumps(params))
|
||||
|
||||
def delete_metric_filter(self, log_group_name, filter_name):
|
||||
"""
|
||||
Deletes a metric filter associated with the specified log
|
||||
group.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type filter_name: string
|
||||
:param filter_name: The name of the metric filter.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'filterName': filter_name,
|
||||
}
|
||||
return self.make_request(action='DeleteMetricFilter',
|
||||
body=json.dumps(params))
|
||||
|
||||
def delete_retention_policy(self, log_group_name):
|
||||
"""
|
||||
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
"""
|
||||
params = {'logGroupName': log_group_name, }
|
||||
return self.make_request(action='DeleteRetentionPolicy',
|
||||
body=json.dumps(params))
|
||||
|
||||
def describe_log_groups(self, log_group_name_prefix=None,
|
||||
next_token=None, limit=None):
|
||||
"""
|
||||
Returns all the log groups that are associated with the AWS
|
||||
account making the request. The list returned in the response
|
||||
is ASCII-sorted by log group name.
|
||||
|
||||
By default, this operation returns up to 50 log groups. If
|
||||
there are more log groups to list, the response would contain
|
||||
a `nextToken` value in the response body. You can also limit
|
||||
the number of log groups returned in the response by
|
||||
specifying the `limit` parameter in the request.
|
||||
|
||||
:type log_group_name_prefix: string
|
||||
:param log_group_name_prefix:
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: A string token used for pagination that points to
|
||||
the next page of results. It must be a value obtained from the
|
||||
response of the previous `DescribeLogGroups` request.
|
||||
|
||||
:type limit: integer
|
||||
:param limit: The maximum number of items returned in the response. If
|
||||
you don't specify a value, the request would return up to 50 items.
|
||||
|
||||
"""
|
||||
params = {}
|
||||
if log_group_name_prefix is not None:
|
||||
params['logGroupNamePrefix'] = log_group_name_prefix
|
||||
if next_token is not None:
|
||||
params['nextToken'] = next_token
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
return self.make_request(action='DescribeLogGroups',
|
||||
body=json.dumps(params))
|
||||
|
||||
def describe_log_streams(self, log_group_name,
|
||||
log_stream_name_prefix=None, next_token=None,
|
||||
limit=None):
|
||||
"""
|
||||
Returns all the log streams that are associated with the
|
||||
specified log group. The list returned in the response is
|
||||
ASCII-sorted by log stream name.
|
||||
|
||||
By default, this operation returns up to 50 log streams. If
|
||||
there are more log streams to list, the response would contain
|
||||
a `nextToken` value in the response body. You can also limit
|
||||
the number of log streams returned in the response by
|
||||
specifying the `limit` parameter in the request.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type log_stream_name_prefix: string
|
||||
:param log_stream_name_prefix:
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: A string token used for pagination that points to
|
||||
the next page of results. It must be a value obtained from the
|
||||
response of the previous `DescribeLogStreams` request.
|
||||
|
||||
:type limit: integer
|
||||
:param limit: The maximum number of items returned in the response. If
|
||||
you don't specify a value, the request would return up to 50 items.
|
||||
|
||||
"""
|
||||
params = {'logGroupName': log_group_name, }
|
||||
if log_stream_name_prefix is not None:
|
||||
params['logStreamNamePrefix'] = log_stream_name_prefix
|
||||
if next_token is not None:
|
||||
params['nextToken'] = next_token
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
return self.make_request(action='DescribeLogStreams',
|
||||
body=json.dumps(params))
|
||||
|
||||
def describe_metric_filters(self, log_group_name,
|
||||
filter_name_prefix=None, next_token=None,
|
||||
limit=None):
|
||||
"""
|
||||
Returns all the metrics filters associated with the specified
|
||||
log group. The list returned in the response is ASCII-sorted
|
||||
by filter name.
|
||||
|
||||
By default, this operation returns up to 50 metric filters. If
|
||||
there are more metric filters to list, the response would
|
||||
contain a `nextToken` value in the response body. You can also
|
||||
limit the number of metric filters returned in the response by
|
||||
specifying the `limit` parameter in the request.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type filter_name_prefix: string
|
||||
:param filter_name_prefix: The name of the metric filter.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: A string token used for pagination that points to
|
||||
the next page of results. It must be a value obtained from the
|
||||
response of the previous `DescribeMetricFilters` request.
|
||||
|
||||
:type limit: integer
|
||||
:param limit: The maximum number of items returned in the response. If
|
||||
you don't specify a value, the request would return up to 50 items.
|
||||
|
||||
"""
|
||||
params = {'logGroupName': log_group_name, }
|
||||
if filter_name_prefix is not None:
|
||||
params['filterNamePrefix'] = filter_name_prefix
|
||||
if next_token is not None:
|
||||
params['nextToken'] = next_token
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
return self.make_request(action='DescribeMetricFilters',
|
||||
body=json.dumps(params))
|
||||
|
||||
def get_log_events(self, log_group_name, log_stream_name,
|
||||
start_time=None, end_time=None, next_token=None,
|
||||
limit=None, start_from_head=None):
|
||||
"""
|
||||
Retrieves log events from the specified log stream. You can
|
||||
provide an optional time range to filter the results on the
|
||||
event `timestamp`.
|
||||
|
||||
By default, this operation returns as much log events as can
|
||||
fit in a response size of 1MB, up to 10,000 log events. The
|
||||
response will always include a `nextForwardToken` and a
|
||||
`nextBackwardToken` in the response body. You can use any of
|
||||
these tokens in subsequent `GetLogEvents` requests to paginate
|
||||
through events in either forward or backward direction. You
|
||||
can also limit the number of log events returned in the
|
||||
response by specifying the `limit` parameter in the request.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type log_stream_name: string
|
||||
:param log_stream_name:
|
||||
|
||||
:type start_time: long
|
||||
:param start_time: A point in time expressed as the number milliseconds
|
||||
since Jan 1, 1970 00:00:00 UTC.
|
||||
|
||||
:type end_time: long
|
||||
:param end_time: A point in time expressed as the number milliseconds
|
||||
since Jan 1, 1970 00:00:00 UTC.
|
||||
|
||||
:type next_token: string
|
||||
:param next_token: A string token used for pagination that points to
|
||||
the next page of results. It must be a value obtained from the
|
||||
`nextForwardToken` or `nextBackwardToken` fields in the response of
|
||||
the previous `GetLogEvents` request.
|
||||
|
||||
:type limit: integer
|
||||
:param limit: The maximum number of log events returned in the
|
||||
response. If you don't specify a value, the request would return as
|
||||
much log events as can fit in a response size of 1MB, up to 10,000
|
||||
log events.
|
||||
|
||||
:type start_from_head: boolean
|
||||
:param start_from_head:
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'logStreamName': log_stream_name,
|
||||
}
|
||||
if start_time is not None:
|
||||
params['startTime'] = start_time
|
||||
if end_time is not None:
|
||||
params['endTime'] = end_time
|
||||
if next_token is not None:
|
||||
params['nextToken'] = next_token
|
||||
if limit is not None:
|
||||
params['limit'] = limit
|
||||
if start_from_head is not None:
|
||||
params['startFromHead'] = start_from_head
|
||||
return self.make_request(action='GetLogEvents',
|
||||
body=json.dumps(params))
|
||||
|
||||
def put_log_events(self, log_group_name, log_stream_name, log_events,
|
||||
sequence_token=None):
|
||||
"""
|
||||
Uploads a batch of log events to the specified log stream.
|
||||
|
||||
Every PutLogEvents request must include the `sequenceToken`
|
||||
obtained from the response of the previous request. An upload
|
||||
in a newly created log stream does not require a
|
||||
`sequenceToken`.
|
||||
|
||||
The batch of events must satisfy the following constraints:
|
||||
|
||||
+ The maximum batch size is 32,768 bytes, and this size is
|
||||
calculated as the sum of all event messages in UTF-8, plus 26
|
||||
bytes for each log event.
|
||||
+ None of the log events in the batch can be more than 2 hours
|
||||
in the future.
|
||||
+ None of the log events in the batch can be older than 14
|
||||
days or the retention period of the log group.
|
||||
+ The log events in the batch must be in chronological ordered
|
||||
by their `timestamp`.
|
||||
+ The maximum number of log events in a batch is 1,000.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type log_stream_name: string
|
||||
:param log_stream_name:
|
||||
|
||||
:type log_events: list
|
||||
:param log_events: A list of events belonging to a log stream.
|
||||
|
||||
:type sequence_token: string
|
||||
:param sequence_token: A string token that must be obtained from the
|
||||
response of the previous `PutLogEvents` request.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'logStreamName': log_stream_name,
|
||||
'logEvents': log_events,
|
||||
}
|
||||
if sequence_token is not None:
|
||||
params['sequenceToken'] = sequence_token
|
||||
return self.make_request(action='PutLogEvents',
|
||||
body=json.dumps(params))
|
||||
|
||||
def put_metric_filter(self, log_group_name, filter_name, filter_pattern,
|
||||
metric_transformations):
|
||||
"""
|
||||
Creates or updates a metric filter and associates it with the
|
||||
specified log group. Metric filters allow you to configure
|
||||
rules to extract metric data from log events ingested through
|
||||
`PutLogEvents` requests.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type filter_name: string
|
||||
:param filter_name: The name of the metric filter.
|
||||
|
||||
:type filter_pattern: string
|
||||
:param filter_pattern:
|
||||
|
||||
:type metric_transformations: list
|
||||
:param metric_transformations:
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'filterName': filter_name,
|
||||
'filterPattern': filter_pattern,
|
||||
'metricTransformations': metric_transformations,
|
||||
}
|
||||
return self.make_request(action='PutMetricFilter',
|
||||
body=json.dumps(params))
|
||||
|
||||
def put_retention_policy(self, log_group_name, retention_in_days):
|
||||
"""
|
||||
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type retention_in_days: integer
|
||||
:param retention_in_days: Specifies the number of days you want to
|
||||
retain log events in the specified log group. Possible values are:
|
||||
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'retentionInDays': retention_in_days,
|
||||
}
|
||||
return self.make_request(action='PutRetentionPolicy',
|
||||
body=json.dumps(params))
|
||||
|
||||
def set_retention(self, log_group_name, retention_in_days):
|
||||
"""
|
||||
Sets the retention of the specified log group. Log groups are
|
||||
created with a default retention of 14 days. The retention
|
||||
attribute allow you to configure the number of days you want
|
||||
to retain log events in the specified log group.
|
||||
|
||||
:type log_group_name: string
|
||||
:param log_group_name:
|
||||
|
||||
:type retention_in_days: integer
|
||||
:param retention_in_days: Specifies the number of days you want to
|
||||
retain log events in the specified log group. Possible values are:
|
||||
1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 547, 730.
|
||||
|
||||
"""
|
||||
params = {
|
||||
'logGroupName': log_group_name,
|
||||
'retentionInDays': retention_in_days,
|
||||
}
|
||||
return self.make_request(action='SetRetention',
|
||||
body=json.dumps(params))
|
||||
|
||||
def test_metric_filter(self, filter_pattern, log_event_messages):
|
||||
"""
|
||||
Tests the filter pattern of a metric filter against a sample
|
||||
of log event messages. You can use this operation to validate
|
||||
the correctness of a metric filter pattern.
|
||||
|
||||
:type filter_pattern: string
|
||||
:param filter_pattern:
|
||||
|
||||
:type log_event_messages: list
|
||||
:param log_event_messages:
|
||||
|
||||
"""
|
||||
params = {
|
||||
'filterPattern': filter_pattern,
|
||||
'logEventMessages': log_event_messages,
|
||||
}
|
||||
return self.make_request(action='TestMetricFilter',
|
||||
body=json.dumps(params))
|
||||
|
||||
def make_request(self, action, body):
|
||||
headers = {
|
||||
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
|
||||
'Host': self.region.endpoint,
|
||||
'Content-Type': 'application/x-amz-json-1.1',
|
||||
'Content-Length': str(len(body)),
|
||||
}
|
||||
http_request = self.build_base_http_request(
|
||||
method='POST', path='/', auth_path='/', params={},
|
||||
headers=headers, data=body)
|
||||
response = self._mexe(http_request, sender=None,
|
||||
override_num_retries=10)
|
||||
response_body = response.read().decode('utf-8')
|
||||
boto.log.debug(response_body)
|
||||
if response.status == 200:
|
||||
if response_body:
|
||||
return json.loads(response_body)
|
||||
else:
|
||||
json_body = json.loads(response_body)
|
||||
fault_name = json_body.get('__type', None)
|
||||
exception_class = self._faults.get(fault_name, self.ResponseError)
|
||||
raise exception_class(response.status, response.reason,
|
||||
body=json_body)
|
||||
@ -18,20 +18,36 @@
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
"""
|
||||
The cmdshell module uses the paramiko package to create SSH connections
|
||||
to the servers that are represented by instance objects. The module has
|
||||
functions for running commands, managing files, and opening interactive
|
||||
shell sessions over those connections.
|
||||
"""
|
||||
from boto.mashups.interactive import interactive_shell
|
||||
import boto
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
import StringIO
|
||||
import paramiko
|
||||
import socket
|
||||
import subprocess
|
||||
|
||||
from boto.compat import StringIO
|
||||
|
||||
class SSHClient(object):
|
||||
|
||||
"""
|
||||
This class creates a paramiko.SSHClient() object that represents
|
||||
a session with an SSH server. You can use the SSHClient object to send
|
||||
commands to the remote host and manipulate files on the remote host.
|
||||
|
||||
:ivar server: A Server object or FakeServer object.
|
||||
:ivar host_key_file: The path to the user's .ssh key files.
|
||||
:ivar uname: The username for the SSH connection. Default = 'root'.
|
||||
:ivar timeout: The optional timeout variable for the TCP connection.
|
||||
:ivar ssh_pwd: An optional password to use for authentication or for
|
||||
unlocking the private key.
|
||||
"""
|
||||
def __init__(self, server,
|
||||
host_key_file='~/.ssh/known_hosts',
|
||||
uname='root', timeout=None, ssh_pwd=None):
|
||||
@ -48,6 +64,12 @@ class SSHClient(object):
|
||||
self.connect()
|
||||
|
||||
def connect(self, num_retries=5):
|
||||
"""
|
||||
Connect to an SSH server and authenticate with it.
|
||||
|
||||
:type num_retries: int
|
||||
:param num_retries: The maximum number of connection attempts.
|
||||
"""
|
||||
retry = 0
|
||||
while retry < num_retries:
|
||||
try:
|
||||
@ -56,53 +78,132 @@ class SSHClient(object):
|
||||
pkey=self._pkey,
|
||||
timeout=self._timeout)
|
||||
return
|
||||
except socket.error, (value, message):
|
||||
except socket.error as xxx_todo_changeme:
|
||||
(value, message) = xxx_todo_changeme.args
|
||||
if value in (51, 61, 111):
|
||||
print 'SSH Connection refused, will retry in 5 seconds'
|
||||
print('SSH Connection refused, will retry in 5 seconds')
|
||||
time.sleep(5)
|
||||
retry += 1
|
||||
else:
|
||||
raise
|
||||
except paramiko.BadHostKeyException:
|
||||
print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname
|
||||
print 'Edit that file to remove the entry and then hit return to try again'
|
||||
print("%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname)
|
||||
print('Edit that file to remove the entry and then hit return to try again')
|
||||
raw_input('Hit Enter when ready')
|
||||
retry += 1
|
||||
except EOFError:
|
||||
print 'Unexpected Error from SSH Connection, retry in 5 seconds'
|
||||
print('Unexpected Error from SSH Connection, retry in 5 seconds')
|
||||
time.sleep(5)
|
||||
retry += 1
|
||||
print 'Could not establish SSH connection'
|
||||
print('Could not establish SSH connection')
|
||||
|
||||
def open_sftp(self):
|
||||
"""
|
||||
Open an SFTP session on the SSH server.
|
||||
|
||||
:rtype: :class:`paramiko.sftp_client.SFTPClient`
|
||||
:return: An SFTP client object.
|
||||
"""
|
||||
return self._ssh_client.open_sftp()
|
||||
|
||||
def get_file(self, src, dst):
|
||||
"""
|
||||
Open an SFTP session on the remote host, and copy a file from
|
||||
the remote host to the specified path on the local host.
|
||||
|
||||
:type src: string
|
||||
:param src: The path to the target file on the remote host.
|
||||
|
||||
:type dst: string
|
||||
:param dst: The path on your local host where you want to
|
||||
store the file.
|
||||
"""
|
||||
sftp_client = self.open_sftp()
|
||||
sftp_client.get(src, dst)
|
||||
|
||||
def put_file(self, src, dst):
|
||||
"""
|
||||
Open an SFTP session on the remote host, and copy a file from
|
||||
the local host to the specified path on the remote host.
|
||||
|
||||
:type src: string
|
||||
:param src: The path to the target file on your local host.
|
||||
|
||||
:type dst: string
|
||||
:param dst: The path on the remote host where you want to store
|
||||
the file.
|
||||
"""
|
||||
sftp_client = self.open_sftp()
|
||||
sftp_client.put(src, dst)
|
||||
|
||||
def open(self, filename, mode='r', bufsize=-1):
|
||||
"""
|
||||
Open a file on the remote system and return a file-like object.
|
||||
Open an SFTP session to the remote host, and open a file on
|
||||
that host.
|
||||
|
||||
:type filename: string
|
||||
:param filename: The path to the file on the remote host.
|
||||
|
||||
:type mode: string
|
||||
:param mode: The file interaction mode.
|
||||
|
||||
:type bufsize: integer
|
||||
:param bufsize: The file buffer size.
|
||||
|
||||
:rtype: :class:`paramiko.sftp_file.SFTPFile`
|
||||
:return: A paramiko proxy object for a file on the remote server.
|
||||
"""
|
||||
sftp_client = self.open_sftp()
|
||||
return sftp_client.open(filename, mode, bufsize)
|
||||
|
||||
def listdir(self, path):
|
||||
"""
|
||||
List all of the files and subdirectories at the specified path
|
||||
on the remote host.
|
||||
|
||||
:type path: string
|
||||
:param path: The base path from which to obtain the list.
|
||||
|
||||
:rtype: list
|
||||
:return: A list of files and subdirectories at the specified path.
|
||||
"""
|
||||
sftp_client = self.open_sftp()
|
||||
return sftp_client.listdir(path)
|
||||
|
||||
def isdir(self, path):
|
||||
"""
|
||||
Check the specified path on the remote host to determine if
|
||||
it is a directory.
|
||||
|
||||
:type path: string
|
||||
:param path: The path to the directory that you want to check.
|
||||
|
||||
:rtype: integer
|
||||
:return: If the path is a directory, the function returns 1.
|
||||
If the path is a file or an invalid path, the function
|
||||
returns 0.
|
||||
"""
|
||||
status = self.run('[ -d %s ] || echo "FALSE"' % path)
|
||||
if status[1].startswith('FALSE'):
|
||||
return 0
|
||||
return 1
|
||||
|
||||
def exists(self, path):
|
||||
"""
|
||||
Check the remote host for the specified path, or a file
|
||||
at the specified path. This function returns 1 if the
|
||||
path or the file exist on the remote host, and returns 0 if
|
||||
the path or the file does not exist on the remote host.
|
||||
|
||||
:type path: string
|
||||
:param path: The path to the directory or file that you want to check.
|
||||
|
||||
:rtype: integer
|
||||
:return: If the path or the file exist, the function returns 1.
|
||||
If the path or the file do not exist on the remote host,
|
||||
the function returns 0.
|
||||
"""
|
||||
|
||||
status = self.run('[ -a %s ] || echo "FALSE"' % path)
|
||||
if status[1].startswith('FALSE'):
|
||||
return 0
|
||||
@ -110,16 +211,22 @@ class SSHClient(object):
|
||||
|
||||
def shell(self):
|
||||
"""
|
||||
Start an interactive shell session on the remote host.
|
||||
Start an interactive shell session with the remote host.
|
||||
"""
|
||||
channel = self._ssh_client.invoke_shell()
|
||||
interactive_shell(channel)
|
||||
|
||||
def run(self, command):
|
||||
"""
|
||||
Execute a command on the remote host. Return a tuple containing
|
||||
an integer status and two strings, the first containing stdout
|
||||
and the second containing stderr from the command.
|
||||
Run a command on the remote host.
|
||||
|
||||
:type command: string
|
||||
:param command: The command that you want to send to the remote host.
|
||||
|
||||
:rtype: tuple
|
||||
:return: This function returns a tuple that contains an integer status,
|
||||
the stdout from the command, and the stderr from the command.
|
||||
|
||||
"""
|
||||
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
|
||||
status = 0
|
||||
@ -138,8 +245,14 @@ class SSHClient(object):
|
||||
|
||||
def run_pty(self, command):
|
||||
"""
|
||||
Execute a command on the remote host with a pseudo-terminal.
|
||||
Returns a string containing the output of the command.
|
||||
Request a pseudo-terminal from a server, and execute a command on that
|
||||
server.
|
||||
|
||||
:type command: string
|
||||
:param command: The command that you want to run on the remote host.
|
||||
|
||||
:rtype: :class:`paramiko.channel.Channel`
|
||||
:return: An open channel object.
|
||||
"""
|
||||
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
|
||||
channel = self._ssh_client.get_transport().open_session()
|
||||
@ -148,38 +261,77 @@ class SSHClient(object):
|
||||
return channel
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Close an SSH session and any open channels that are tied to it.
|
||||
"""
|
||||
transport = self._ssh_client.get_transport()
|
||||
transport.close()
|
||||
self.server.reset_cmdshell()
|
||||
|
||||
class LocalClient(object):
|
||||
|
||||
"""
|
||||
:ivar server: A Server object or FakeServer object.
|
||||
:ivar host_key_file: The path to the user's .ssh key files.
|
||||
:ivar uname: The username for the SSH connection. Default = 'root'.
|
||||
"""
|
||||
def __init__(self, server, host_key_file=None, uname='root'):
|
||||
self.server = server
|
||||
self.host_key_file = host_key_file
|
||||
self.uname = uname
|
||||
|
||||
def get_file(self, src, dst):
|
||||
"""
|
||||
Copy a file from one directory to another.
|
||||
"""
|
||||
shutil.copyfile(src, dst)
|
||||
|
||||
def put_file(self, src, dst):
|
||||
"""
|
||||
Copy a file from one directory to another.
|
||||
"""
|
||||
shutil.copyfile(src, dst)
|
||||
|
||||
def listdir(self, path):
|
||||
"""
|
||||
List all of the files and subdirectories at the specified path.
|
||||
|
||||
:rtype: list
|
||||
:return: Return a list containing the names of the entries
|
||||
in the directory given by path.
|
||||
"""
|
||||
return os.listdir(path)
|
||||
|
||||
def isdir(self, path):
|
||||
"""
|
||||
Check the specified path to determine if it is a directory.
|
||||
|
||||
:rtype: boolean
|
||||
:return: Returns True if the path is an existing directory.
|
||||
"""
|
||||
return os.path.isdir(path)
|
||||
|
||||
def exists(self, path):
|
||||
"""
|
||||
Check for the specified path, or check a file at the specified path.
|
||||
|
||||
:rtype: boolean
|
||||
:return: If the path or the file exist, the function returns True.
|
||||
"""
|
||||
return os.path.exists(path)
|
||||
|
||||
def shell(self):
|
||||
raise NotImplementedError('shell not supported with LocalClient')
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Open a subprocess and run a command on the local host.
|
||||
|
||||
:rtype: tuple
|
||||
:return: This function returns a tuple that contains an integer status
|
||||
and a string with the combined stdout and stderr output.
|
||||
"""
|
||||
boto.log.info('running:%s' % self.command)
|
||||
log_fp = StringIO.StringIO()
|
||||
log_fp = StringIO()
|
||||
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
while process.poll() is None:
|
||||
@ -196,9 +348,13 @@ class LocalClient(object):
|
||||
|
||||
class FakeServer(object):
|
||||
"""
|
||||
A little class to fake out SSHClient (which is expecting a
|
||||
:class`boto.manage.server.Server` instance. This allows us
|
||||
to
|
||||
This object has a subset of the variables that are normally in a
|
||||
:class:`boto.manage.server.Server` object. You can use this FakeServer
|
||||
object to create a :class:`boto.manage.SSHClient` object if you
|
||||
don't have a real Server object.
|
||||
|
||||
:ivar instance: A boto Instance object.
|
||||
:ivar ssh_key_file: The path to the SSH key file.
|
||||
"""
|
||||
def __init__(self, instance, ssh_key_file):
|
||||
self.instance = instance
|
||||
@ -207,6 +363,14 @@ class FakeServer(object):
|
||||
self.instance_id = self.instance.id
|
||||
|
||||
def start(server):
|
||||
"""
|
||||
Connect to the specified server.
|
||||
|
||||
:return: If the server is local, the function returns a
|
||||
:class:`boto.manage.cmdshell.LocalClient` object.
|
||||
If the server is remote, the function returns a
|
||||
:class:`boto.manage.cmdshell.SSHClient` object.
|
||||
"""
|
||||
instance_id = boto.config.get('Instance', 'instance-id', None)
|
||||
if instance_id == server.instance_id:
|
||||
return LocalClient(server)
|
||||
@ -223,19 +387,19 @@ def sshclient_from_instance(instance, ssh_key_file,
|
||||
:type instance: :class`boto.ec2.instance.Instance` object
|
||||
:param instance: The instance object.
|
||||
|
||||
:type ssh_key_file: str
|
||||
:param ssh_key_file: A path to the private key file used
|
||||
to log into instance.
|
||||
:type ssh_key_file: string
|
||||
:param ssh_key_file: A path to the private key file that is
|
||||
used to log into the instance.
|
||||
|
||||
:type host_key_file: str
|
||||
:type host_key_file: string
|
||||
:param host_key_file: A path to the known_hosts file used
|
||||
by the SSH client.
|
||||
Defaults to ~/.ssh/known_hosts
|
||||
:type user_name: str
|
||||
:type user_name: string
|
||||
:param user_name: The username to use when logging into
|
||||
the instance. Defaults to root.
|
||||
|
||||
:type ssh_pwd: str
|
||||
:type ssh_pwd: string
|
||||
:param ssh_pwd: The passphrase, if any, associated with
|
||||
private key.
|
||||
"""
|
||||
|
||||
@ -19,7 +19,6 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
|
||||
def get(prop, choices=None):
|
||||
prompt = prop.verbose_name
|
||||
if not prompt:
|
||||
@ -38,7 +37,7 @@ def get(prop, choices=None):
|
||||
value = choices[i-1]
|
||||
if isinstance(value, tuple):
|
||||
value = value[0]
|
||||
print '[%d] %s' % (i, value)
|
||||
print('[%d] %s' % (i, value))
|
||||
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
|
||||
try:
|
||||
int_value = int(value)
|
||||
@ -47,18 +46,18 @@ def get(prop, choices=None):
|
||||
value = value[1]
|
||||
valid = True
|
||||
except ValueError:
|
||||
print '%s is not a valid choice' % value
|
||||
print('%s is not a valid choice' % value)
|
||||
except IndexError:
|
||||
print '%s is not within the range[%d-%d]' % (min, max)
|
||||
print('%s is not within the range[%d-%d]' % (min, max))
|
||||
else:
|
||||
value = raw_input('%s: ' % prompt)
|
||||
try:
|
||||
value = prop.validate(value)
|
||||
if prop.empty(value) and prop.required:
|
||||
print 'A value is required'
|
||||
print('A value is required')
|
||||
else:
|
||||
valid = True
|
||||
except:
|
||||
print 'Invalid value: %s' % value
|
||||
print('Invalid value: %s' % value)
|
||||
return value
|
||||
|
||||
|
||||
@ -19,11 +19,10 @@
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
# IN THE SOFTWARE.
|
||||
|
||||
"""
|
||||
High-level abstraction of an EC2 server
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
|
||||
import boto.ec2
|
||||
from boto.mashups.iobject import IObject
|
||||
from boto.pyami.config import BotoConfigPath, Config
|
||||
@ -32,9 +31,10 @@ from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanPropert
|
||||
from boto.manage import propget
|
||||
from boto.ec2.zone import Zone
|
||||
from boto.ec2.keypair import KeyPair
|
||||
import os, time, StringIO
|
||||
import os, time
|
||||
from contextlib import closing
|
||||
from boto.exception import EC2ResponseError
|
||||
from boto.compat import six, StringIO
|
||||
|
||||
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
|
||||
'c1.medium', 'c1.xlarge',
|
||||
@ -49,7 +49,7 @@ class Bundler(object):
|
||||
self.ssh_client = SSHClient(server, uname=uname)
|
||||
|
||||
def copy_x509(self, key_file, cert_file):
|
||||
print '\tcopying cert and pk over to /mnt directory on server'
|
||||
print('\tcopying cert and pk over to /mnt directory on server')
|
||||
self.ssh_client.open_sftp()
|
||||
path, name = os.path.split(key_file)
|
||||
self.remote_key_file = '/mnt/%s' % name
|
||||
@ -57,7 +57,7 @@ class Bundler(object):
|
||||
path, name = os.path.split(cert_file)
|
||||
self.remote_cert_file = '/mnt/%s' % name
|
||||
self.ssh_client.put_file(cert_file, self.remote_cert_file)
|
||||
print '...complete!'
|
||||
print('...complete!')
|
||||
|
||||
def bundle_image(self, prefix, size, ssh_key):
|
||||
command = ""
|
||||
@ -103,7 +103,7 @@ class Bundler(object):
|
||||
ssh_key = self.server.get_ssh_key_file()
|
||||
self.copy_x509(key_file, cert_file)
|
||||
if not fp:
|
||||
fp = StringIO.StringIO()
|
||||
fp = StringIO()
|
||||
fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath)
|
||||
fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ')
|
||||
if clear_history:
|
||||
@ -115,13 +115,13 @@ class Bundler(object):
|
||||
fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath)
|
||||
fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys')
|
||||
command = fp.getvalue()
|
||||
print 'running the following command on the remote server:'
|
||||
print command
|
||||
print('running the following command on the remote server:')
|
||||
print(command)
|
||||
t = self.ssh_client.run(command)
|
||||
print '\t%s' % t[0]
|
||||
print '\t%s' % t[1]
|
||||
print '...complete!'
|
||||
print 'registering image...'
|
||||
print('\t%s' % t[0])
|
||||
print('\t%s' % t[1])
|
||||
print('...complete!')
|
||||
print('registering image...')
|
||||
self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix))
|
||||
return self.image_id
|
||||
|
||||
@ -250,7 +250,7 @@ class Server(Model):
|
||||
instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True)
|
||||
status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True)
|
||||
launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True)
|
||||
console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=file, use_method=True)
|
||||
console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=open, use_method=True)
|
||||
|
||||
packages = []
|
||||
plugins = []
|
||||
@ -305,7 +305,7 @@ class Server(Model):
|
||||
# deal with possibly passed in logical volume:
|
||||
if logical_volume != None:
|
||||
cfg.set('EBS', 'logical_volume_name', logical_volume.name)
|
||||
cfg_fp = StringIO.StringIO()
|
||||
cfg_fp = StringIO()
|
||||
cfg.write(cfg_fp)
|
||||
# deal with the possibility that zone and/or keypair are strings read from the config file:
|
||||
if isinstance(zone, Zone):
|
||||
@ -325,14 +325,14 @@ class Server(Model):
|
||||
instances = reservation.instances
|
||||
if elastic_ip is not None and instances.__len__() > 0:
|
||||
instance = instances[0]
|
||||
print 'Waiting for instance to start so we can set its elastic IP address...'
|
||||
print('Waiting for instance to start so we can set its elastic IP address...')
|
||||
# Sometimes we get a message from ec2 that says that the instance does not exist.
|
||||
# Hopefully the following delay will giv eec2 enough time to get to a stable state:
|
||||
time.sleep(5)
|
||||
while instance.update() != 'running':
|
||||
time.sleep(1)
|
||||
instance.use_ip(elastic_ip)
|
||||
print 'set the elastic IP of the first instance to %s' % elastic_ip
|
||||
print('set the elastic IP of the first instance to %s' % elastic_ip)
|
||||
for instance in instances:
|
||||
s = cls()
|
||||
s.ec2 = ec2
|
||||
@ -381,7 +381,7 @@ class Server(Model):
|
||||
for reservation in rs:
|
||||
for instance in reservation.instances:
|
||||
try:
|
||||
Server.find(instance_id=instance.id).next()
|
||||
next(Server.find(instance_id=instance.id))
|
||||
boto.log.info('Server for %s already exists' % instance.id)
|
||||
except StopIteration:
|
||||
s = cls()
|
||||
@ -527,7 +527,7 @@ class Server(Model):
|
||||
|
||||
def get_cmdshell(self):
|
||||
if not self._cmdshell:
|
||||
import cmdshell
|
||||
from boto.manage import cmdshell
|
||||
self.get_ssh_key_file()
|
||||
self._cmdshell = cmdshell.start(self)
|
||||
return self._cmdshell
|
||||
|
||||
@ -23,7 +23,8 @@
|
||||
import boto
|
||||
from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty
|
||||
from boto.sdb.db.model import Model
|
||||
import datetime, subprocess, StringIO, time
|
||||
import datetime, subprocess, time
|
||||
from boto.compat import StringIO
|
||||
|
||||
def check_hour(val):
|
||||
if val == '*':
|
||||
@ -100,7 +101,7 @@ class Task(Model):
|
||||
|
||||
def _run(self, msg, vtimeout):
|
||||
boto.log.info('Task[%s] - running:%s' % (self.name, self.command))
|
||||
log_fp = StringIO.StringIO()
|
||||
log_fp = StringIO()
|
||||
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
nsecs = 5
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user