diff --git a/awx/lib/site-packages/azure/__init__.py b/awx/lib/site-packages/azure/__init__.py new file mode 100644 index 0000000000..d3228953f5 --- /dev/null +++ b/awx/lib/site-packages/azure/__init__.py @@ -0,0 +1,905 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import ast +import base64 +import sys +import types +import warnings +if sys.version_info < (3,): + from urllib2 import quote as url_quote + from urllib2 import unquote as url_unquote + _strtype = basestring +else: + from urllib.parse import quote as url_quote + from urllib.parse import unquote as url_unquote + _strtype = str + +from datetime import datetime +from xml.dom import minidom +from xml.sax.saxutils import escape as xml_escape + +#-------------------------------------------------------------------------- +# constants + +__author__ = 'Microsoft Corp. ' +__version__ = '0.8.1' + +# Live ServiceClient URLs +BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net' +QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net' +TABLE_SERVICE_HOST_BASE = '.table.core.windows.net' +SERVICE_BUS_HOST_BASE = '.servicebus.windows.net' +MANAGEMENT_HOST = 'management.core.windows.net' + +# Development ServiceClient URLs +DEV_BLOB_HOST = '127.0.0.1:10000' +DEV_QUEUE_HOST = '127.0.0.1:10001' +DEV_TABLE_HOST = '127.0.0.1:10002' + +# Default credentials for Development Storage Service +DEV_ACCOUNT_NAME = 'devstoreaccount1' +DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==' + +# All of our error messages +_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.' +_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.' +_ERROR_INCORRECT_TABLE_IN_BATCH = \ + 'Table should be the same in a batch operations' +_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \ + 'Partition Key should be the same in a batch operations' +_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \ + 'Row Keys should not be the same in a batch operations' +_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail' +_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \ + 'Message is not peek locked and cannot be deleted.' +_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \ + 'Message is not peek locked and cannot be unlocked.' +_ERROR_QUEUE_NOT_FOUND = 'Queue was not found' +_ERROR_TOPIC_NOT_FOUND = 'Topic was not found' +_ERROR_CONFLICT = 'Conflict ({0})' +_ERROR_NOT_FOUND = 'Not found ({0})' +_ERROR_UNKNOWN = 'Unknown error ({0})' +_ERROR_SERVICEBUS_MISSING_INFO = \ + 'You need to provide servicebus namespace, access key and Issuer' +_ERROR_STORAGE_MISSING_INFO = \ + 'You need to provide both account name and access key' +_ERROR_ACCESS_POLICY = \ + 'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \ + 'instance' +_WARNING_VALUE_SHOULD_BE_BYTES = \ + 'Warning: {0} must be bytes data type. It will be converted ' + \ + 'automatically, with utf-8 text encoding.' +_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.' +_ERROR_VALUE_NONE = '{0} should not be None.' +_ERROR_VALUE_NEGATIVE = '{0} should not be negative.' +_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \ + 'Cannot serialize the specified value ({0}) to an entity. Please use ' + \ + 'an EntityProperty (which can specify custom types), int, str, bool, ' + \ + 'or datetime.' +_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \ + 'Invalid page blob size: {0}. ' + \ + 'The size must be aligned to a 512-byte boundary.' + +_USER_AGENT_STRING = 'pyazure/' + __version__ + +METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata' + + +class WindowsAzureData(object): + + ''' This is the base of data class. + It is only used to check whether it is instance or not. ''' + pass + + +class WindowsAzureError(Exception): + + ''' WindowsAzure Excpetion base class. ''' + + def __init__(self, message): + super(WindowsAzureError, self).__init__(message) + + +class WindowsAzureConflictError(WindowsAzureError): + + '''Indicates that the resource could not be created because it already + exists''' + + def __init__(self, message): + super(WindowsAzureConflictError, self).__init__(message) + + +class WindowsAzureMissingResourceError(WindowsAzureError): + + '''Indicates that a request for a request for a resource (queue, table, + container, etc...) failed because the specified resource does not exist''' + + def __init__(self, message): + super(WindowsAzureMissingResourceError, self).__init__(message) + + +class WindowsAzureBatchOperationError(WindowsAzureError): + + '''Indicates that a batch operation failed''' + + def __init__(self, message, code): + super(WindowsAzureBatchOperationError, self).__init__(message) + self.code = code + + +class Feed(object): + pass + + +class _Base64String(str): + pass + + +class HeaderDict(dict): + + def __getitem__(self, index): + return super(HeaderDict, self).__getitem__(index.lower()) + + +def _encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def _decode_base64_to_bytes(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def _decode_base64_to_text(data): + decoded_bytes = _decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def _get_readable_id(id_name, id_prefix_to_skip): + """simplified an id to be more friendly for us people""" + # id_name is in the form 'https://namespace.host.suffix/name' + # where name may contain a forward slash! + pos = id_name.find('//') + if pos != -1: + pos += 2 + if id_prefix_to_skip: + pos = id_name.find(id_prefix_to_skip, pos) + if pos != -1: + pos += len(id_prefix_to_skip) + pos = id_name.find('/', pos) + if pos != -1: + return id_name[pos + 1:] + return id_name + + +def _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None): + ''' get properties from entry xml ''' + xmldoc = minidom.parseString(xmlstr) + properties = {} + + for entry in _get_child_nodes(xmldoc, 'entry'): + etag = entry.getAttributeNS(METADATA_NS, 'etag') + if etag: + properties['etag'] = etag + for updated in _get_child_nodes(entry, 'updated'): + properties['updated'] = updated.firstChild.nodeValue + for name in _get_children_from_path(entry, 'author', 'name'): + if name.firstChild is not None: + properties['author'] = name.firstChild.nodeValue + + if include_id: + for id in _get_child_nodes(entry, 'id'): + properties['name'] = _get_readable_id( + id.firstChild.nodeValue, id_prefix_to_skip) + + return properties + + +def _get_first_child_node_value(parent_node, node_name): + xml_attrs = _get_child_nodes(parent_node, node_name) + if xml_attrs: + xml_attr = xml_attrs[0] + if xml_attr.firstChild: + value = xml_attr.firstChild.nodeValue + return value + + +def _get_child_nodes(node, tagName): + return [childNode for childNode in node.getElementsByTagName(tagName) + if childNode.parentNode == node] + + +def _get_children_from_path(node, *path): + '''descends through a hierarchy of nodes returning the list of children + at the inner most level. Only returns children who share a common parent, + not cousins.''' + cur = node + for index, child in enumerate(path): + if isinstance(child, _strtype): + next = _get_child_nodes(cur, child) + else: + next = _get_child_nodesNS(cur, *child) + if index == len(path) - 1: + return next + elif not next: + break + + cur = next[0] + return [] + + +def _get_child_nodesNS(node, ns, tagName): + return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName) + if childNode.parentNode == node] + + +def _create_entry(entry_body): + ''' Adds common part of entry to a given entry body and return the whole + xml. ''' + updated_str = datetime.utcnow().isoformat() + if datetime.utcnow().utcoffset() is None: + updated_str += '+00:00' + + entry_start = ''' + +<updated>{updated}</updated><author><name /></author><id /> +<content type="application/xml"> + {body}</content></entry>''' + return entry_start.format(updated=updated_str, body=entry_body) + + +def _to_datetime(strtime): + return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f") + +_KNOWN_SERIALIZATION_XFORMS = { + 'include_apis': 'IncludeAPIs', + 'message_id': 'MessageId', + 'content_md5': 'Content-MD5', + 'last_modified': 'Last-Modified', + 'cache_control': 'Cache-Control', + 'account_admin_live_email_id': 'AccountAdminLiveEmailId', + 'service_admin_live_email_id': 'ServiceAdminLiveEmailId', + 'subscription_id': 'SubscriptionID', + 'fqdn': 'FQDN', + 'private_id': 'PrivateID', + 'os_virtual_hard_disk': 'OSVirtualHardDisk', + 'logical_disk_size_in_gb': 'LogicalDiskSizeInGB', + 'logical_size_in_gb': 'LogicalSizeInGB', + 'os': 'OS', + 'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo', + 'copy_id': 'CopyId', + } + + +def _get_serialization_name(element_name): + """converts a Python name into a serializable name""" + known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) + if known is not None: + return known + + if element_name.startswith('x_ms_'): + return element_name.replace('_', '-') + if element_name.endswith('_id'): + element_name = element_name.replace('_id', 'ID') + for name in ['content_', 'last_modified', 'if_', 'cache_control']: + if element_name.startswith(name): + element_name = element_name.replace('_', '-_') + + return ''.join(name.capitalize() for name in element_name.split('_')) + +if sys.version_info < (3,): + _unicode_type = unicode + + def _str(value): + if isinstance(value, unicode): + return value.encode('utf-8') + + return str(value) +else: + _str = str + _unicode_type = str + + +def _str_or_none(value): + if value is None: + return None + + return _str(value) + + +def _int_or_none(value): + if value is None: + return None + + return str(int(value)) + + +def _bool_or_none(value): + if value is None: + return None + + if isinstance(value, bool): + if value: + return 'true' + else: + return 'false' + + return str(value) + + +def _convert_class_to_xml(source, xml_prefix=True): + if source is None: + return '' + + xmlstr = '' + if xml_prefix: + xmlstr = '<?xml version="1.0" encoding="utf-8"?>' + + if isinstance(source, list): + for value in source: + xmlstr += _convert_class_to_xml(value, False) + elif isinstance(source, WindowsAzureData): + class_name = source.__class__.__name__ + xmlstr += '<' + class_name + '>' + for name, value in vars(source).items(): + if value is not None: + if isinstance(value, list) or \ + isinstance(value, WindowsAzureData): + xmlstr += _convert_class_to_xml(value, False) + else: + xmlstr += ('<' + _get_serialization_name(name) + '>' + + xml_escape(str(value)) + '</' + + _get_serialization_name(name) + '>') + xmlstr += '</' + class_name + '>' + return xmlstr + + +def _find_namespaces_from_child(parent, child, namespaces): + """Recursively searches from the parent to the child, + gathering all the applicable namespaces along the way""" + for cur_child in parent.childNodes: + if cur_child is child: + return True + if _find_namespaces_from_child(cur_child, child, namespaces): + # we are the parent node + for key in cur_child.attributes.keys(): + if key.startswith('xmlns:') or key == 'xmlns': + namespaces[key] = cur_child.attributes[key] + break + return False + + +def _find_namespaces(parent, child): + res = {} + for key in parent.documentElement.attributes.keys(): + if key.startswith('xmlns:') or key == 'xmlns': + res[key] = parent.documentElement.attributes[key] + _find_namespaces_from_child(parent, child, res) + return res + + +def _clone_node_with_namespaces(node_to_clone, original_doc): + clone = node_to_clone.cloneNode(True) + + for key, value in _find_namespaces(original_doc, node_to_clone).items(): + clone.attributes[key] = value + + return clone + + +def _convert_response_to_feeds(response, convert_func): + if response is None: + return None + + feeds = _list_of(Feed) + + x_ms_continuation = HeaderDict() + for name, value in response.headers: + if 'x-ms-continuation' in name: + x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value + if x_ms_continuation: + setattr(feeds, 'x_ms_continuation', x_ms_continuation) + + xmldoc = minidom.parseString(response.body) + xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry') + if not xml_entries: + # in some cases, response contains only entry but no feed + xml_entries = _get_children_from_path(xmldoc, 'entry') + for xml_entry in xml_entries: + new_node = _clone_node_with_namespaces(xml_entry, xmldoc) + feeds.append(convert_func(new_node.toxml('utf-8'))) + + return feeds + + +def _validate_type_bytes(param_name, param): + if not isinstance(param, bytes): + raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) + + +def _validate_not_none(param_name, param): + if param is None: + raise TypeError(_ERROR_VALUE_NONE.format(param_name)) + + +def _fill_list_of(xmldoc, element_type, xml_element_name): + xmlelements = _get_child_nodes(xmldoc, xml_element_name) + return [_parse_response_body_from_xml_node(xmlelement, element_type) \ + for xmlelement in xmlelements] + + +def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name, + xml_element_name): + '''Converts an xml fragment into a list of scalar types. The parent xml + element contains a flat list of xml elements which are converted into the + specified scalar type and added to the list. + Example: + xmldoc= +<Endpoints> + <Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint> + <Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint> + <Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint> +</Endpoints> + element_type=str + parent_xml_element_name='Endpoints' + xml_element_name='Endpoint' + ''' + xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: + xmlelements = _get_child_nodes(xmlelements[0], xml_element_name) + return [_get_node_value(xmlelement, element_type) \ + for xmlelement in xmlelements] + + +def _fill_dict(xmldoc, element_name): + xmlelements = _get_child_nodes(xmldoc, element_name) + if xmlelements: + return_obj = {} + for child in xmlelements[0].childNodes: + if child.firstChild: + return_obj[child.nodeName] = child.firstChild.nodeValue + return return_obj + + +def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name, + key_xml_element_name, value_xml_element_name): + '''Converts an xml fragment into a dictionary. The parent xml element + contains a list of xml elements where each element has a child element for + the key, and another for the value. + Example: + xmldoc= +<ExtendedProperties> + <ExtendedProperty> + <Name>Ext1</Name> + <Value>Val1</Value> + </ExtendedProperty> + <ExtendedProperty> + <Name>Ext2</Name> + <Value>Val2</Value> + </ExtendedProperty> +</ExtendedProperties> + element_type=str + parent_xml_element_name='ExtendedProperties' + pair_xml_element_name='ExtendedProperty' + key_xml_element_name='Name' + value_xml_element_name='Value' + ''' + return_obj = {} + + xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: + xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name) + for pair in xmlelements: + keys = _get_child_nodes(pair, key_xml_element_name) + values = _get_child_nodes(pair, value_xml_element_name) + if keys and values: + key = keys[0].firstChild.nodeValue + value = values[0].firstChild.nodeValue + return_obj[key] = value + + return return_obj + + +def _fill_instance_child(xmldoc, element_name, return_type): + '''Converts a child of the current dom element to the specified type. + ''' + xmlelements = _get_child_nodes( + xmldoc, _get_serialization_name(element_name)) + + if not xmlelements: + return None + + return_obj = return_type() + _fill_data_to_return_object(xmlelements[0], return_obj) + + return return_obj + + +def _fill_instance_element(element, return_type): + """Converts a DOM element into the specified object""" + return _parse_response_body_from_xml_node(element, return_type) + + +def _fill_data_minidom(xmldoc, element_name, data_member): + xmlelements = _get_child_nodes( + xmldoc, _get_serialization_name(element_name)) + + if not xmlelements or not xmlelements[0].childNodes: + return None + + value = xmlelements[0].firstChild.nodeValue + + if data_member is None: + return value + elif isinstance(data_member, datetime): + return _to_datetime(value) + elif type(data_member) is bool: + return value.lower() != 'false' + else: + return type(data_member)(value) + + +def _get_node_value(xmlelement, data_type): + value = xmlelement.firstChild.nodeValue + if data_type is datetime: + return _to_datetime(value) + elif data_type is bool: + return value.lower() != 'false' + else: + return data_type(value) + + +def _get_request_body_bytes_only(param_name, param_value): + '''Validates the request body passed in and converts it to bytes + if our policy allows it.''' + if param_value is None: + return b'' + + if isinstance(param_value, bytes): + return param_value + + # Previous versions of the SDK allowed data types other than bytes to be + # passed in, and they would be auto-converted to bytes. We preserve this + # behavior when running under 2.7, but issue a warning. + # Python 3 support is new, so we reject anything that's not bytes. + if sys.version_info < (3,): + warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name)) + return _get_request_body(param_value) + + raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) + + +def _get_request_body(request_body): + '''Converts an object into a request body. If it's None + we'll return an empty string, if it's one of our objects it'll + convert it to XML and return it. Otherwise we just use the object + directly''' + if request_body is None: + return b'' + + if isinstance(request_body, WindowsAzureData): + request_body = _convert_class_to_xml(request_body) + + if isinstance(request_body, bytes): + return request_body + + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') + + request_body = str(request_body) + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') + + return request_body + + +def _parse_enum_results_list(response, return_type, resp_type, item_type): + """resp_body is the XML we received +resp_type is a string, such as Containers, +return_type is the type we're constructing, such as ContainerEnumResults +item_type is the type object of the item to be created, such as Container + +This function then returns a ContainerEnumResults object with the +containers member populated with the results. +""" + + # parsing something like: + # <EnumerationResults ... > + # <Queues> + # <Queue> + # <Something /> + # <SomethingElse /> + # </Queue> + # </Queues> + # </EnumerationResults> + respbody = response.body + return_obj = return_type() + doc = minidom.parseString(respbody) + + items = [] + for enum_results in _get_child_nodes(doc, 'EnumerationResults'): + # path is something like Queues, Queue + for child in _get_children_from_path(enum_results, + resp_type, + resp_type[:-1]): + items.append(_fill_instance_element(child, item_type)) + + for name, value in vars(return_obj).items(): + # queues, Queues, this is the list its self which we populated + # above + if name == resp_type.lower(): + # the list its self. + continue + value = _fill_data_minidom(enum_results, name, value) + if value is not None: + setattr(return_obj, name, value) + + setattr(return_obj, resp_type.lower(), items) + return return_obj + + +def _parse_simple_list(response, type, item_type, list_name): + respbody = response.body + res = type() + res_items = [] + doc = minidom.parseString(respbody) + type_name = type.__name__ + item_name = item_type.__name__ + for item in _get_children_from_path(doc, type_name, item_name): + res_items.append(_fill_instance_element(item, item_type)) + + setattr(res, list_name, res_items) + return res + + +def _parse_response(response, return_type): + ''' + Parse the HTTPResponse's body and fill all the data into a class of + return_type. + ''' + return _parse_response_body_from_xml_text(response.body, return_type) + + +def _fill_data_to_return_object(node, return_obj): + members = dict(vars(return_obj)) + for name, value in members.items(): + if isinstance(value, _list_of): + setattr(return_obj, + name, + _fill_list_of(node, + value.list_type, + value.xml_element_name)) + elif isinstance(value, _scalar_list_of): + setattr(return_obj, + name, + _fill_scalar_list_of(node, + value.list_type, + _get_serialization_name(name), + value.xml_element_name)) + elif isinstance(value, _dict_of): + setattr(return_obj, + name, + _fill_dict_of(node, + _get_serialization_name(name), + value.pair_xml_element_name, + value.key_xml_element_name, + value.value_xml_element_name)) + elif isinstance(value, WindowsAzureData): + setattr(return_obj, + name, + _fill_instance_child(node, name, value.__class__)) + elif isinstance(value, dict): + setattr(return_obj, + name, + _fill_dict(node, _get_serialization_name(name))) + elif isinstance(value, _Base64String): + value = _fill_data_minidom(node, name, '') + if value is not None: + value = _decode_base64_to_text(value) + # always set the attribute, so we don't end up returning an object + # with type _Base64String + setattr(return_obj, name, value) + else: + value = _fill_data_minidom(node, name, value) + if value is not None: + setattr(return_obj, name, value) + + +def _parse_response_body_from_xml_node(node, return_type): + ''' + parse the xml and fill all the data into a class of return_type + ''' + return_obj = return_type() + _fill_data_to_return_object(node, return_obj) + + return return_obj + + +def _parse_response_body_from_xml_text(respbody, return_type): + ''' + parse the xml and fill all the data into a class of return_type + ''' + doc = minidom.parseString(respbody) + return_obj = return_type() + for node in _get_child_nodes(doc, return_type.__name__): + _fill_data_to_return_object(node, return_obj) + + return return_obj + + +class _dict_of(dict): + + """a dict which carries with it the xml element names for key,val. + Used for deserializaion and construction of the lists""" + + def __init__(self, pair_xml_element_name, key_xml_element_name, + value_xml_element_name): + self.pair_xml_element_name = pair_xml_element_name + self.key_xml_element_name = key_xml_element_name + self.value_xml_element_name = value_xml_element_name + super(_dict_of, self).__init__() + + +class _list_of(list): + + """a list which carries with it the type that's expected to go in it. + Used for deserializaion and construction of the lists""" + + def __init__(self, list_type, xml_element_name=None): + self.list_type = list_type + if xml_element_name is None: + self.xml_element_name = list_type.__name__ + else: + self.xml_element_name = xml_element_name + super(_list_of, self).__init__() + + +class _scalar_list_of(list): + + """a list of scalar types which carries with it the type that's + expected to go in it along with its xml element name. + Used for deserializaion and construction of the lists""" + + def __init__(self, list_type, xml_element_name): + self.list_type = list_type + self.xml_element_name = xml_element_name + super(_scalar_list_of, self).__init__() + + +def _update_request_uri_query_local_storage(request, use_local_storage): + ''' create correct uri and query for the request ''' + uri, query = _update_request_uri_query(request) + if use_local_storage: + return '/' + DEV_ACCOUNT_NAME + uri, query + return uri, query + + +def _update_request_uri_query(request): + '''pulls the query string out of the URI and moves it into + the query portion of the request object. If there are already + query parameters on the request the parameters in the URI will + appear after the existing parameters''' + + if '?' in request.path: + request.path, _, query_string = request.path.partition('?') + if query_string: + query_params = query_string.split('&') + for query in query_params: + if '=' in query: + name, _, value = query.partition('=') + request.query.append((name, value)) + + request.path = url_quote(request.path, '/()$=\',') + + # add encoded queries to request.path. + if request.query: + request.path += '?' + for name, value in request.query: + if value is not None: + request.path += name + '=' + url_quote(value, '/()$=\',') + '&' + request.path = request.path[:-1] + + return request.path, request.query + + +def _dont_fail_on_exist(error): + ''' don't throw exception if the resource exists. + This is called by create_* APIs with fail_on_exist=False''' + if isinstance(error, WindowsAzureConflictError): + return False + else: + raise error + + +def _dont_fail_not_exist(error): + ''' don't throw exception if the resource doesn't exist. + This is called by create_* APIs with fail_on_exist=False''' + if isinstance(error, WindowsAzureMissingResourceError): + return False + else: + raise error + + +def _general_error_handler(http_error): + ''' Simple error handler for azure.''' + if http_error.status == 409: + raise WindowsAzureConflictError( + _ERROR_CONFLICT.format(str(http_error))) + elif http_error.status == 404: + raise WindowsAzureMissingResourceError( + _ERROR_NOT_FOUND.format(str(http_error))) + else: + if http_error.respbody is not None: + raise WindowsAzureError( + _ERROR_UNKNOWN.format(str(http_error)) + '\n' + \ + http_error.respbody.decode('utf-8')) + else: + raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error))) + + +def _parse_response_for_dict(response): + ''' Extracts name-values from response header. Filter out the standard + http headers.''' + + if response is None: + return None + http_headers = ['server', 'date', 'location', 'host', + 'via', 'proxy-connection', 'connection'] + return_dict = HeaderDict() + if response.headers: + for name, value in response.headers: + if not name.lower() in http_headers: + return_dict[name] = value + + return return_dict + + +def _parse_response_for_dict_prefix(response, prefixes): + ''' Extracts name-values for names starting with prefix from response + header. Filter out the standard http headers.''' + + if response is None: + return None + return_dict = {} + orig_dict = _parse_response_for_dict(response) + if orig_dict: + for name, value in orig_dict.items(): + for prefix_value in prefixes: + if name.lower().startswith(prefix_value.lower()): + return_dict[name] = value + break + return return_dict + else: + return None + + +def _parse_response_for_dict_filter(response, filter): + ''' Extracts name-values for names in filter from response header. Filter + out the standard http headers.''' + if response is None: + return None + return_dict = {} + orig_dict = _parse_response_for_dict(response) + if orig_dict: + for name, value in orig_dict.items(): + if name.lower() in filter: + return_dict[name] = value + return return_dict + else: + return None diff --git a/awx/lib/site-packages/azure/http/__init__.py b/awx/lib/site-packages/azure/http/__init__.py new file mode 100644 index 0000000000..3bc1e258db --- /dev/null +++ b/awx/lib/site-packages/azure/http/__init__.py @@ -0,0 +1,73 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- + +HTTP_RESPONSE_NO_CONTENT = 204 + + +class HTTPError(Exception): + + ''' HTTP Exception when response status code >= 300 ''' + + def __init__(self, status, message, respheader, respbody): + '''Creates a new HTTPError with the specified status, message, + response headers and body''' + self.status = status + self.respheader = respheader + self.respbody = respbody + Exception.__init__(self, message) + + +class HTTPResponse(object): + + """Represents a response from an HTTP request. An HTTPResponse has the + following attributes: + + status: the status code of the response + message: the message + headers: the returned headers, as a list of (name, value) pairs + body: the body of the response + """ + + def __init__(self, status, message, headers, body): + self.status = status + self.message = message + self.headers = headers + self.body = body + + +class HTTPRequest(object): + + '''Represents an HTTP Request. An HTTP Request consists of the following + attributes: + + host: the host name to connect to + method: the method to use to connect (string such as GET, POST, PUT, etc.) + path: the uri fragment + query: query parameters specified as a list of (name, value) pairs + headers: header values specified as (name, value) pairs + body: the body of the request. + protocol_override: + specify to use this protocol instead of the global one stored in + _HTTPClient. + ''' + + def __init__(self): + self.host = '' + self.method = '' + self.path = '' + self.query = [] # list of (name, value) + self.headers = [] # list of (header name, header value) + self.body = '' + self.protocol_override = None diff --git a/awx/lib/site-packages/azure/http/batchclient.py b/awx/lib/site-packages/azure/http/batchclient.py new file mode 100644 index 0000000000..0e6d60d3bb --- /dev/null +++ b/awx/lib/site-packages/azure/http/batchclient.py @@ -0,0 +1,339 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import sys +import uuid + +from azure import ( + _update_request_uri_query, + WindowsAzureError, + WindowsAzureBatchOperationError, + _get_children_from_path, + url_unquote, + _ERROR_CANNOT_FIND_PARTITION_KEY, + _ERROR_CANNOT_FIND_ROW_KEY, + _ERROR_INCORRECT_TABLE_IN_BATCH, + _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH, + _ERROR_DUPLICATE_ROW_KEY_IN_BATCH, + _ERROR_BATCH_COMMIT_FAIL, + ) +from azure.http import HTTPError, HTTPRequest, HTTPResponse +from azure.http.httpclient import _HTTPClient +from azure.storage import ( + _update_storage_table_header, + METADATA_NS, + _sign_storage_table_request, + ) +from xml.dom import minidom + +_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices' + +if sys.version_info < (3,): + def _new_boundary(): + return str(uuid.uuid1()) +else: + def _new_boundary(): + return str(uuid.uuid1()).encode('utf-8') + + +class _BatchClient(_HTTPClient): + + ''' + This is the class that is used for batch operation for storage table + service. It only supports one changeset. + ''' + + def __init__(self, service_instance, account_key, account_name, + protocol='http'): + _HTTPClient.__init__(self, service_instance, account_name=account_name, + account_key=account_key, protocol=protocol) + self.is_batch = False + self.batch_requests = [] + self.batch_table = '' + self.batch_partition_key = '' + self.batch_row_keys = [] + + def get_request_table(self, request): + ''' + Extracts table name from request.uri. The request.uri has either + "/mytable(...)" or "/mytable" format. + + request: the request to insert, update or delete entity + ''' + if '(' in request.path: + pos = request.path.find('(') + return request.path[1:pos] + else: + return request.path[1:] + + def get_request_partition_key(self, request): + ''' + Extracts PartitionKey from request.body if it is a POST request or from + request.path if it is not a POST request. Only insert operation request + is a POST request and the PartitionKey is in the request body. + + request: the request to insert, update or delete entity + ''' + if request.method == 'POST': + doc = minidom.parseString(request.body) + part_key = _get_children_from_path( + doc, 'entry', 'content', (METADATA_NS, 'properties'), + (_DATASERVICES_NS, 'PartitionKey')) + if not part_key: + raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY) + return part_key[0].firstChild.nodeValue + else: + uri = url_unquote(request.path) + pos1 = uri.find('PartitionKey=\'') + pos2 = uri.find('\',', pos1) + if pos1 == -1 or pos2 == -1: + raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY) + return uri[pos1 + len('PartitionKey=\''):pos2] + + def get_request_row_key(self, request): + ''' + Extracts RowKey from request.body if it is a POST request or from + request.path if it is not a POST request. Only insert operation request + is a POST request and the Rowkey is in the request body. + + request: the request to insert, update or delete entity + ''' + if request.method == 'POST': + doc = minidom.parseString(request.body) + row_key = _get_children_from_path( + doc, 'entry', 'content', (METADATA_NS, 'properties'), + (_DATASERVICES_NS, 'RowKey')) + if not row_key: + raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY) + return row_key[0].firstChild.nodeValue + else: + uri = url_unquote(request.path) + pos1 = uri.find('RowKey=\'') + pos2 = uri.find('\')', pos1) + if pos1 == -1 or pos2 == -1: + raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY) + row_key = uri[pos1 + len('RowKey=\''):pos2] + return row_key + + def validate_request_table(self, request): + ''' + Validates that all requests have the same table name. Set the table + name if it is the first request for the batch operation. + + request: the request to insert, update or delete entity + ''' + if self.batch_table: + if self.get_request_table(request) != self.batch_table: + raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH) + else: + self.batch_table = self.get_request_table(request) + + def validate_request_partition_key(self, request): + ''' + Validates that all requests have the same PartitiionKey. Set the + PartitionKey if it is the first request for the batch operation. + + request: the request to insert, update or delete entity + ''' + if self.batch_partition_key: + if self.get_request_partition_key(request) != \ + self.batch_partition_key: + raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH) + else: + self.batch_partition_key = self.get_request_partition_key(request) + + def validate_request_row_key(self, request): + ''' + Validates that all requests have the different RowKey and adds RowKey + to existing RowKey list. + + request: the request to insert, update or delete entity + ''' + if self.batch_row_keys: + if self.get_request_row_key(request) in self.batch_row_keys: + raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH) + else: + self.batch_row_keys.append(self.get_request_row_key(request)) + + def begin_batch(self): + ''' + Starts the batch operation. Intializes the batch variables + + is_batch: batch operation flag. + batch_table: the table name of the batch operation + batch_partition_key: the PartitionKey of the batch requests. + batch_row_keys: the RowKey list of adding requests. + batch_requests: the list of the requests. + ''' + self.is_batch = True + self.batch_table = '' + self.batch_partition_key = '' + self.batch_row_keys = [] + self.batch_requests = [] + + def insert_request_to_batch(self, request): + ''' + Adds request to batch operation. + + request: the request to insert, update or delete entity + ''' + self.validate_request_table(request) + self.validate_request_partition_key(request) + self.validate_request_row_key(request) + self.batch_requests.append(request) + + def commit_batch(self): + ''' Resets batch flag and commits the batch requests. ''' + if self.is_batch: + self.is_batch = False + self.commit_batch_requests() + + def commit_batch_requests(self): + ''' Commits the batch requests. ''' + + batch_boundary = b'batch_' + _new_boundary() + changeset_boundary = b'changeset_' + _new_boundary() + + # Commits batch only the requests list is not empty. + if self.batch_requests: + request = HTTPRequest() + request.method = 'POST' + request.host = self.batch_requests[0].host + request.path = '/$batch' + request.headers = [ + ('Content-Type', 'multipart/mixed; boundary=' + \ + batch_boundary.decode('utf-8')), + ('Accept', 'application/atom+xml,application/xml'), + ('Accept-Charset', 'UTF-8')] + + request.body = b'--' + batch_boundary + b'\n' + request.body += b'Content-Type: multipart/mixed; boundary=' + request.body += changeset_boundary + b'\n\n' + + content_id = 1 + + # Adds each request body to the POST data. + for batch_request in self.batch_requests: + request.body += b'--' + changeset_boundary + b'\n' + request.body += b'Content-Type: application/http\n' + request.body += b'Content-Transfer-Encoding: binary\n\n' + request.body += batch_request.method.encode('utf-8') + request.body += b' http://' + request.body += batch_request.host.encode('utf-8') + request.body += batch_request.path.encode('utf-8') + request.body += b' HTTP/1.1\n' + request.body += b'Content-ID: ' + request.body += str(content_id).encode('utf-8') + b'\n' + content_id += 1 + + # Add different headers for different type requests. + if not batch_request.method == 'DELETE': + request.body += \ + b'Content-Type: application/atom+xml;type=entry\n' + for name, value in batch_request.headers: + if name == 'If-Match': + request.body += name.encode('utf-8') + b': ' + request.body += value.encode('utf-8') + b'\n' + break + request.body += b'Content-Length: ' + request.body += str(len(batch_request.body)).encode('utf-8') + request.body += b'\n\n' + request.body += batch_request.body + b'\n' + else: + for name, value in batch_request.headers: + # If-Match should be already included in + # batch_request.headers, but in case it is missing, + # just add it. + if name == 'If-Match': + request.body += name.encode('utf-8') + b': ' + request.body += value.encode('utf-8') + b'\n\n' + break + else: + request.body += b'If-Match: *\n\n' + + request.body += b'--' + changeset_boundary + b'--' + b'\n' + request.body += b'--' + batch_boundary + b'--' + + request.path, request.query = _update_request_uri_query(request) + request.headers = _update_storage_table_header(request) + auth = _sign_storage_table_request(request, + self.account_name, + self.account_key) + request.headers.append(('Authorization', auth)) + + # Submit the whole request as batch request. + response = self.perform_request(request) + if response.status >= 300: + raise HTTPError(response.status, + _ERROR_BATCH_COMMIT_FAIL, + self.respheader, + response.body) + + # http://www.odata.org/documentation/odata-version-2-0/batch-processing/ + # The body of a ChangeSet response is either a response for all the + # successfully processed change request within the ChangeSet, + # formatted exactly as it would have appeared outside of a batch, + # or a single response indicating a failure of the entire ChangeSet. + responses = self._parse_batch_response(response.body) + if responses and responses[0].status >= 300: + self._report_batch_error(responses[0]) + + def cancel_batch(self): + ''' Resets the batch flag. ''' + self.is_batch = False + + def _parse_batch_response(self, body): + parts = body.split(b'--changesetresponse_') + + responses = [] + for part in parts: + httpLocation = part.find(b'HTTP/') + if httpLocation > 0: + response = self._parse_batch_response_part(part[httpLocation:]) + responses.append(response) + + return responses + + def _parse_batch_response_part(self, part): + lines = part.splitlines(); + + # First line is the HTTP status/reason + status, _, reason = lines[0].partition(b' ')[2].partition(b' ') + + # Followed by headers and body + headers = [] + body = b'' + isBody = False + for line in lines[1:]: + if line == b'' and not isBody: + isBody = True + elif isBody: + body += line + else: + headerName, _, headerVal = line.partition(b':') + headers.append((headerName.lower(), headerVal)) + + return HTTPResponse(int(status), reason.strip(), headers, body) + + def _report_batch_error(self, response): + xml = response.body.decode('utf-8') + doc = minidom.parseString(xml) + + n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'code') + code = n[0].firstChild.nodeValue if n and n[0].firstChild else '' + + n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'message') + message = n[0].firstChild.nodeValue if n and n[0].firstChild else xml + + raise WindowsAzureBatchOperationError(message, code) diff --git a/awx/lib/site-packages/azure/http/httpclient.py b/awx/lib/site-packages/azure/http/httpclient.py new file mode 100644 index 0000000000..662d60e92c --- /dev/null +++ b/awx/lib/site-packages/azure/http/httpclient.py @@ -0,0 +1,223 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import base64 +import os +import sys + +if sys.version_info < (3,): + from httplib import ( + HTTPSConnection, + HTTPConnection, + HTTP_PORT, + HTTPS_PORT, + ) + from urlparse import urlparse +else: + from http.client import ( + HTTPSConnection, + HTTPConnection, + HTTP_PORT, + HTTPS_PORT, + ) + from urllib.parse import urlparse + +from azure.http import HTTPError, HTTPResponse +from azure import _USER_AGENT_STRING, _update_request_uri_query + + +class _HTTPClient(object): + + ''' + Takes the request and sends it to cloud service and returns the response. + ''' + + def __init__(self, service_instance, cert_file=None, account_name=None, + account_key=None, service_namespace=None, issuer=None, + protocol='https'): + ''' + service_instance: service client instance. + cert_file: + certificate file name/location. This is only used in hosted + service management. + account_name: the storage account. + account_key: + the storage account access key for storage services or servicebus + access key for service bus service. + service_namespace: the service namespace for service bus. + issuer: the issuer for service bus service. + ''' + self.service_instance = service_instance + self.status = None + self.respheader = None + self.message = None + self.cert_file = cert_file + self.account_name = account_name + self.account_key = account_key + self.service_namespace = service_namespace + self.issuer = issuer + self.protocol = protocol + self.proxy_host = None + self.proxy_port = None + self.proxy_user = None + self.proxy_password = None + self.use_httplib = self.should_use_httplib() + + def should_use_httplib(self): + if sys.platform.lower().startswith('win') and self.cert_file: + # On Windows, auto-detect between Windows Store Certificate + # (winhttp) and OpenSSL .pem certificate file (httplib). + # + # We used to only support certificates installed in the Windows + # Certificate Store. + # cert_file example: CURRENT_USER\my\CertificateName + # + # We now support using an OpenSSL .pem certificate file, + # for a consistent experience across all platforms. + # cert_file example: account\certificate.pem + # + # When using OpenSSL .pem certificate file on Windows, make sure + # you are on CPython 2.7.4 or later. + + # If it's not an existing file on disk, then treat it as a path in + # the Windows Certificate Store, which means we can't use httplib. + if not os.path.isfile(self.cert_file): + return False + + return True + + def set_proxy(self, host, port, user, password): + ''' + Sets the proxy server host and port for the HTTP CONNECT Tunnelling. + + host: Address of the proxy. Ex: '192.168.0.100' + port: Port of the proxy. Ex: 6000 + user: User for proxy authorization. + password: Password for proxy authorization. + ''' + self.proxy_host = host + self.proxy_port = port + self.proxy_user = user + self.proxy_password = password + + def get_connection(self, request): + ''' Create connection for the request. ''' + protocol = request.protocol_override \ + if request.protocol_override else self.protocol + target_host = request.host + target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT + + if not self.use_httplib: + import azure.http.winhttp + connection = azure.http.winhttp._HTTPConnection( + target_host, cert_file=self.cert_file, protocol=protocol) + proxy_host = self.proxy_host + proxy_port = self.proxy_port + else: + if ':' in target_host: + target_host, _, target_port = target_host.rpartition(':') + if self.proxy_host: + proxy_host = target_host + proxy_port = target_port + host = self.proxy_host + port = self.proxy_port + else: + host = target_host + port = target_port + + if protocol == 'http': + connection = HTTPConnection(host, int(port)) + else: + connection = HTTPSConnection( + host, int(port), cert_file=self.cert_file) + + if self.proxy_host: + headers = None + if self.proxy_user and self.proxy_password: + auth = base64.encodestring( + "{0}:{1}".format(self.proxy_user, self.proxy_password)) + headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)} + connection.set_tunnel(proxy_host, int(proxy_port), headers) + + return connection + + def send_request_headers(self, connection, request_headers): + if self.use_httplib: + if self.proxy_host: + for i in connection._buffer: + if i.startswith("Host: "): + connection._buffer.remove(i) + connection.putheader( + 'Host', "{0}:{1}".format(connection._tunnel_host, + connection._tunnel_port)) + + for name, value in request_headers: + if value: + connection.putheader(name, value) + + connection.putheader('User-Agent', _USER_AGENT_STRING) + connection.endheaders() + + def send_request_body(self, connection, request_body): + if request_body: + assert isinstance(request_body, bytes) + connection.send(request_body) + elif (not isinstance(connection, HTTPSConnection) and + not isinstance(connection, HTTPConnection)): + connection.send(None) + + def perform_request(self, request): + ''' Sends request to cloud service server and return the response. ''' + connection = self.get_connection(request) + try: + connection.putrequest(request.method, request.path) + + if not self.use_httplib: + if self.proxy_host and self.proxy_user: + connection.set_proxy_credentials( + self.proxy_user, self.proxy_password) + + self.send_request_headers(connection, request.headers) + self.send_request_body(connection, request.body) + + resp = connection.getresponse() + self.status = int(resp.status) + self.message = resp.reason + self.respheader = headers = resp.getheaders() + + # for consistency across platforms, make header names lowercase + for i, value in enumerate(headers): + headers[i] = (value[0].lower(), value[1]) + + respbody = None + if resp.length is None: + respbody = resp.read() + elif resp.length > 0: + respbody = resp.read(resp.length) + + response = HTTPResponse( + int(resp.status), resp.reason, headers, respbody) + if self.status == 307: + new_url = urlparse(dict(headers)['location']) + request.host = new_url.hostname + request.path = new_url.path + request.path, request.query = _update_request_uri_query(request) + return self.perform_request(request) + if self.status >= 300: + raise HTTPError(self.status, self.message, + self.respheader, respbody) + + return response + finally: + connection.close() diff --git a/awx/lib/site-packages/azure/http/winhttp.py b/awx/lib/site-packages/azure/http/winhttp.py new file mode 100644 index 0000000000..86790333fc --- /dev/null +++ b/awx/lib/site-packages/azure/http/winhttp.py @@ -0,0 +1,471 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from ctypes import ( + c_void_p, + c_long, + c_ulong, + c_longlong, + c_ulonglong, + c_short, + c_ushort, + c_wchar_p, + c_byte, + byref, + Structure, + Union, + POINTER, + WINFUNCTYPE, + HRESULT, + oledll, + WinDLL, + ) +import ctypes +import sys + +if sys.version_info >= (3,): + def unicode(text): + return text + +#------------------------------------------------------------------------------ +# Constants that are used in COM operations +VT_EMPTY = 0 +VT_NULL = 1 +VT_I2 = 2 +VT_I4 = 3 +VT_BSTR = 8 +VT_BOOL = 11 +VT_I1 = 16 +VT_UI1 = 17 +VT_UI2 = 18 +VT_UI4 = 19 +VT_I8 = 20 +VT_UI8 = 21 +VT_ARRAY = 8192 + +HTTPREQUEST_PROXYSETTING_PROXY = 2 +HTTPREQUEST_SETCREDENTIALS_FOR_PROXY = 1 + +HTTPREQUEST_PROXY_SETTING = c_long +HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long +#------------------------------------------------------------------------------ +# Com related APIs that are used. +_ole32 = oledll.ole32 +_oleaut32 = WinDLL('oleaut32') +_CLSIDFromString = _ole32.CLSIDFromString +_CoInitialize = _ole32.CoInitialize +_CoInitialize.argtypes = [c_void_p] + +_CoCreateInstance = _ole32.CoCreateInstance + +_SysAllocString = _oleaut32.SysAllocString +_SysAllocString.restype = c_void_p +_SysAllocString.argtypes = [c_wchar_p] + +_SysFreeString = _oleaut32.SysFreeString +_SysFreeString.argtypes = [c_void_p] + +# SAFEARRAY* +# SafeArrayCreateVector(_In_ VARTYPE vt,_In_ LONG lLbound,_In_ ULONG +# cElements); +_SafeArrayCreateVector = _oleaut32.SafeArrayCreateVector +_SafeArrayCreateVector.restype = c_void_p +_SafeArrayCreateVector.argtypes = [c_ushort, c_long, c_ulong] + +# HRESULT +# SafeArrayAccessData(_In_ SAFEARRAY *psa, _Out_ void **ppvData); +_SafeArrayAccessData = _oleaut32.SafeArrayAccessData +_SafeArrayAccessData.argtypes = [c_void_p, POINTER(c_void_p)] + +# HRESULT +# SafeArrayUnaccessData(_In_ SAFEARRAY *psa); +_SafeArrayUnaccessData = _oleaut32.SafeArrayUnaccessData +_SafeArrayUnaccessData.argtypes = [c_void_p] + +# HRESULT +# SafeArrayGetUBound(_In_ SAFEARRAY *psa, _In_ UINT nDim, _Out_ LONG +# *plUbound); +_SafeArrayGetUBound = _oleaut32.SafeArrayGetUBound +_SafeArrayGetUBound.argtypes = [c_void_p, c_ulong, POINTER(c_long)] + + +#------------------------------------------------------------------------------ + +class BSTR(c_wchar_p): + + ''' BSTR class in python. ''' + + def __init__(self, value): + super(BSTR, self).__init__(_SysAllocString(value)) + + def __del__(self): + _SysFreeString(self) + + +class VARIANT(Structure): + + ''' + VARIANT structure in python. Does not match the definition in + MSDN exactly & it is only mapping the used fields. Field names are also + slighty different. + ''' + + class _tagData(Union): + + class _tagRecord(Structure): + _fields_ = [('pvoid', c_void_p), ('precord', c_void_p)] + + _fields_ = [('llval', c_longlong), + ('ullval', c_ulonglong), + ('lval', c_long), + ('ulval', c_ulong), + ('ival', c_short), + ('boolval', c_ushort), + ('bstrval', BSTR), + ('parray', c_void_p), + ('record', _tagRecord)] + + _fields_ = [('vt', c_ushort), + ('wReserved1', c_ushort), + ('wReserved2', c_ushort), + ('wReserved3', c_ushort), + ('vdata', _tagData)] + + @staticmethod + def create_empty(): + variant = VARIANT() + variant.vt = VT_EMPTY + variant.vdata.llval = 0 + return variant + + @staticmethod + def create_safearray_from_str(text): + variant = VARIANT() + variant.vt = VT_ARRAY | VT_UI1 + + length = len(text) + variant.vdata.parray = _SafeArrayCreateVector(VT_UI1, 0, length) + pvdata = c_void_p() + _SafeArrayAccessData(variant.vdata.parray, byref(pvdata)) + ctypes.memmove(pvdata, text, length) + _SafeArrayUnaccessData(variant.vdata.parray) + + return variant + + @staticmethod + def create_bstr_from_str(text): + variant = VARIANT() + variant.vt = VT_BSTR + variant.vdata.bstrval = BSTR(text) + return variant + + @staticmethod + def create_bool_false(): + variant = VARIANT() + variant.vt = VT_BOOL + variant.vdata.boolval = 0 + return variant + + def is_safearray_of_bytes(self): + return self.vt == VT_ARRAY | VT_UI1 + + def str_from_safearray(self): + assert self.vt == VT_ARRAY | VT_UI1 + pvdata = c_void_p() + count = c_long() + _SafeArrayGetUBound(self.vdata.parray, 1, byref(count)) + count = c_long(count.value + 1) + _SafeArrayAccessData(self.vdata.parray, byref(pvdata)) + text = ctypes.string_at(pvdata, count) + _SafeArrayUnaccessData(self.vdata.parray) + return text + + def __del__(self): + _VariantClear(self) + +# HRESULT VariantClear(_Inout_ VARIANTARG *pvarg); +_VariantClear = _oleaut32.VariantClear +_VariantClear.argtypes = [POINTER(VARIANT)] + + +class GUID(Structure): + + ''' GUID structure in python. ''' + + _fields_ = [("data1", c_ulong), + ("data2", c_ushort), + ("data3", c_ushort), + ("data4", c_byte * 8)] + + def __init__(self, name=None): + if name is not None: + _CLSIDFromString(unicode(name), byref(self)) + + +class _WinHttpRequest(c_void_p): + + ''' + Maps the Com API to Python class functions. Not all methods in + IWinHttpWebRequest are mapped - only the methods we use. + ''' + _AddRef = WINFUNCTYPE(c_long) \ + (1, 'AddRef') + _Release = WINFUNCTYPE(c_long) \ + (2, 'Release') + _SetProxy = WINFUNCTYPE(HRESULT, + HTTPREQUEST_PROXY_SETTING, + VARIANT, + VARIANT) \ + (7, 'SetProxy') + _SetCredentials = WINFUNCTYPE(HRESULT, + BSTR, + BSTR, + HTTPREQUEST_SETCREDENTIALS_FLAGS) \ + (8, 'SetCredentials') + _Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT) \ + (9, 'Open') + _SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR) \ + (10, 'SetRequestHeader') + _GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p)) \ + (11, 'GetResponseHeader') + _GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \ + (12, 'GetAllResponseHeaders') + _Send = WINFUNCTYPE(HRESULT, VARIANT) \ + (13, 'Send') + _Status = WINFUNCTYPE(HRESULT, POINTER(c_long)) \ + (14, 'Status') + _StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \ + (15, 'StatusText') + _ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \ + (16, 'ResponseText') + _ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \ + (17, 'ResponseBody') + _ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \ + (18, 'ResponseStream') + _WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort)) \ + (21, 'WaitForResponse') + _Abort = WINFUNCTYPE(HRESULT) \ + (22, 'Abort') + _SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long) \ + (23, 'SetTimeouts') + _SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR) \ + (24, 'SetClientCertificate') + + def open(self, method, url): + ''' + Opens the request. + + method: the request VERB 'GET', 'POST', etc. + url: the url to connect + ''' + _WinHttpRequest._SetTimeouts(self, 0, 65000, 65000, 65000) + + flag = VARIANT.create_bool_false() + _method = BSTR(method) + _url = BSTR(url) + _WinHttpRequest._Open(self, _method, _url, flag) + + def set_request_header(self, name, value): + ''' Sets the request header. ''' + + _name = BSTR(name) + _value = BSTR(value) + _WinHttpRequest._SetRequestHeader(self, _name, _value) + + def get_all_response_headers(self): + ''' Gets back all response headers. ''' + + bstr_headers = c_void_p() + _WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers)) + bstr_headers = ctypes.cast(bstr_headers, c_wchar_p) + headers = bstr_headers.value + _SysFreeString(bstr_headers) + return headers + + def send(self, request=None): + ''' Sends the request body. ''' + + # Sends VT_EMPTY if it is GET, HEAD request. + if request is None: + var_empty = VARIANT.create_empty() + _WinHttpRequest._Send(self, var_empty) + else: # Sends request body as SAFEArray. + _request = VARIANT.create_safearray_from_str(request) + _WinHttpRequest._Send(self, _request) + + def status(self): + ''' Gets status of response. ''' + + status = c_long() + _WinHttpRequest._Status(self, byref(status)) + return int(status.value) + + def status_text(self): + ''' Gets status text of response. ''' + + bstr_status_text = c_void_p() + _WinHttpRequest._StatusText(self, byref(bstr_status_text)) + bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p) + status_text = bstr_status_text.value + _SysFreeString(bstr_status_text) + return status_text + + def response_body(self): + ''' + Gets response body as a SAFEARRAY and converts the SAFEARRAY to str. + If it is an xml file, it always contains 3 characters before <?xml, + so we remove them. + ''' + var_respbody = VARIANT() + _WinHttpRequest._ResponseBody(self, byref(var_respbody)) + if var_respbody.is_safearray_of_bytes(): + respbody = var_respbody.str_from_safearray() + if respbody[3:].startswith(b'<?xml') and\ + respbody.startswith(b'\xef\xbb\xbf'): + respbody = respbody[3:] + return respbody + else: + return '' + + def set_client_certificate(self, certificate): + '''Sets client certificate for the request. ''' + _certificate = BSTR(certificate) + _WinHttpRequest._SetClientCertificate(self, _certificate) + + def set_tunnel(self, host, port): + ''' Sets up the host and the port for the HTTP CONNECT Tunnelling.''' + url = host + if port: + url = url + u':' + port + + var_host = VARIANT.create_bstr_from_str(url) + var_empty = VARIANT.create_empty() + + _WinHttpRequest._SetProxy( + self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty) + + def set_proxy_credentials(self, user, password): + _WinHttpRequest._SetCredentials( + self, BSTR(user), BSTR(password), + HTTPREQUEST_SETCREDENTIALS_FOR_PROXY) + + def __del__(self): + if self.value is not None: + _WinHttpRequest._Release(self) + + +class _Response(object): + + ''' Response class corresponding to the response returned from httplib + HTTPConnection. ''' + + def __init__(self, _status, _status_text, _length, _headers, _respbody): + self.status = _status + self.reason = _status_text + self.length = _length + self.headers = _headers + self.respbody = _respbody + + def getheaders(self): + '''Returns response headers.''' + return self.headers + + def read(self, _length): + '''Returns resonse body. ''' + return self.respbody[:_length] + + +class _HTTPConnection(object): + + ''' Class corresponding to httplib HTTPConnection class. ''' + + def __init__(self, host, cert_file=None, key_file=None, protocol='http'): + ''' initialize the IWinHttpWebRequest Com Object.''' + self.host = unicode(host) + self.cert_file = cert_file + self._httprequest = _WinHttpRequest() + self.protocol = protocol + clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}') + iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}') + _CoInitialize(None) + _CoCreateInstance(byref(clsid), 0, 1, byref(iid), + byref(self._httprequest)) + + def close(self): + pass + + def set_tunnel(self, host, port=None, headers=None): + ''' Sets up the host and the port for the HTTP CONNECT Tunnelling. ''' + self._httprequest.set_tunnel(unicode(host), unicode(str(port))) + + def set_proxy_credentials(self, user, password): + self._httprequest.set_proxy_credentials( + unicode(user), unicode(password)) + + def putrequest(self, method, uri): + ''' Connects to host and sends the request. ''' + + protocol = unicode(self.protocol + '://') + url = protocol + self.host + unicode(uri) + self._httprequest.open(unicode(method), url) + + # sets certificate for the connection if cert_file is set. + if self.cert_file is not None: + self._httprequest.set_client_certificate(unicode(self.cert_file)) + + def putheader(self, name, value): + ''' Sends the headers of request. ''' + if sys.version_info < (3,): + name = str(name).decode('utf-8') + value = str(value).decode('utf-8') + self._httprequest.set_request_header(name, value) + + def endheaders(self): + ''' No operation. Exists only to provide the same interface of httplib + HTTPConnection.''' + pass + + def send(self, request_body): + ''' Sends request body. ''' + if not request_body: + self._httprequest.send() + else: + self._httprequest.send(request_body) + + def getresponse(self): + ''' Gets the response and generates the _Response object''' + status = self._httprequest.status() + status_text = self._httprequest.status_text() + + resp_headers = self._httprequest.get_all_response_headers() + fixed_headers = [] + for resp_header in resp_headers.split('\n'): + if (resp_header.startswith('\t') or\ + resp_header.startswith(' ')) and fixed_headers: + # append to previous header + fixed_headers[-1] += resp_header + else: + fixed_headers.append(resp_header) + + headers = [] + for resp_header in fixed_headers: + if ':' in resp_header: + pos = resp_header.find(':') + headers.append( + (resp_header[:pos].lower(), resp_header[pos + 1:].strip())) + + body = self._httprequest.response_body() + length = len(body) + + return _Response(status, status_text, length, headers, body) diff --git a/awx/lib/site-packages/azure/servicebus/__init__.py b/awx/lib/site-packages/azure/servicebus/__init__.py new file mode 100644 index 0000000000..bee85f4efd --- /dev/null +++ b/awx/lib/site-packages/azure/servicebus/__init__.py @@ -0,0 +1,851 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import ast +import sys + +from datetime import datetime +from xml.dom import minidom +from azure import ( + WindowsAzureData, + WindowsAzureError, + xml_escape, + _create_entry, + _general_error_handler, + _get_entry_properties, + _get_child_nodes, + _get_children_from_path, + _get_first_child_node_value, + _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE, + _ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK, + _ERROR_QUEUE_NOT_FOUND, + _ERROR_TOPIC_NOT_FOUND, + ) +from azure.http import HTTPError + +# default rule name for subscription +DEFAULT_RULE_NAME = '$Default' + +#----------------------------------------------------------------------------- +# Constants for Azure app environment settings. +AZURE_SERVICEBUS_NAMESPACE = 'AZURE_SERVICEBUS_NAMESPACE' +AZURE_SERVICEBUS_ACCESS_KEY = 'AZURE_SERVICEBUS_ACCESS_KEY' +AZURE_SERVICEBUS_ISSUER = 'AZURE_SERVICEBUS_ISSUER' + +# namespace used for converting rules to objects +XML_SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance' + + +class Queue(WindowsAzureData): + + ''' Queue class corresponding to Queue Description: + http://msdn.microsoft.com/en-us/library/windowsazure/hh780773''' + + def __init__(self, lock_duration=None, max_size_in_megabytes=None, + requires_duplicate_detection=None, requires_session=None, + default_message_time_to_live=None, + dead_lettering_on_message_expiration=None, + duplicate_detection_history_time_window=None, + max_delivery_count=None, enable_batched_operations=None, + size_in_bytes=None, message_count=None): + + self.lock_duration = lock_duration + self.max_size_in_megabytes = max_size_in_megabytes + self.requires_duplicate_detection = requires_duplicate_detection + self.requires_session = requires_session + self.default_message_time_to_live = default_message_time_to_live + self.dead_lettering_on_message_expiration = \ + dead_lettering_on_message_expiration + self.duplicate_detection_history_time_window = \ + duplicate_detection_history_time_window + self.max_delivery_count = max_delivery_count + self.enable_batched_operations = enable_batched_operations + self.size_in_bytes = size_in_bytes + self.message_count = message_count + + +class Topic(WindowsAzureData): + + ''' Topic class corresponding to Topic Description: + http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. ''' + + def __init__(self, default_message_time_to_live=None, + max_size_in_megabytes=None, requires_duplicate_detection=None, + duplicate_detection_history_time_window=None, + enable_batched_operations=None, size_in_bytes=None): + + self.default_message_time_to_live = default_message_time_to_live + self.max_size_in_megabytes = max_size_in_megabytes + self.requires_duplicate_detection = requires_duplicate_detection + self.duplicate_detection_history_time_window = \ + duplicate_detection_history_time_window + self.enable_batched_operations = enable_batched_operations + self.size_in_bytes = size_in_bytes + + @property + def max_size_in_mega_bytes(self): + import warnings + warnings.warn( + 'This attribute has been changed to max_size_in_megabytes.') + return self.max_size_in_megabytes + + @max_size_in_mega_bytes.setter + def max_size_in_mega_bytes(self, value): + self.max_size_in_megabytes = value + + +class Subscription(WindowsAzureData): + + ''' Subscription class corresponding to Subscription Description: + http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. ''' + + def __init__(self, lock_duration=None, requires_session=None, + default_message_time_to_live=None, + dead_lettering_on_message_expiration=None, + dead_lettering_on_filter_evaluation_exceptions=None, + enable_batched_operations=None, max_delivery_count=None, + message_count=None): + + self.lock_duration = lock_duration + self.requires_session = requires_session + self.default_message_time_to_live = default_message_time_to_live + self.dead_lettering_on_message_expiration = \ + dead_lettering_on_message_expiration + self.dead_lettering_on_filter_evaluation_exceptions = \ + dead_lettering_on_filter_evaluation_exceptions + self.enable_batched_operations = enable_batched_operations + self.max_delivery_count = max_delivery_count + self.message_count = message_count + + +class Rule(WindowsAzureData): + + ''' Rule class corresponding to Rule Description: + http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. ''' + + def __init__(self, filter_type=None, filter_expression=None, + action_type=None, action_expression=None): + self.filter_type = filter_type + self.filter_expression = filter_expression + self.action_type = action_type + self.action_expression = action_type + + +class Message(WindowsAzureData): + + ''' Message class that used in send message/get mesage apis. ''' + + def __init__(self, body=None, service_bus_service=None, location=None, + custom_properties=None, + type='application/atom+xml;type=entry;charset=utf-8', + broker_properties=None): + self.body = body + self.location = location + self.broker_properties = broker_properties + self.custom_properties = custom_properties + self.type = type + self.service_bus_service = service_bus_service + self._topic_name = None + self._subscription_name = None + self._queue_name = None + + if not service_bus_service: + return + + # if location is set, then extracts the queue name for queue message and + # extracts the topic and subscriptions name if it is topic message. + if location: + if '/subscriptions/' in location: + pos = location.find('/subscriptions/') + pos1 = location.rfind('/', 0, pos - 1) + self._topic_name = location[pos1 + 1:pos] + pos += len('/subscriptions/') + pos1 = location.find('/', pos) + self._subscription_name = location[pos:pos1] + elif '/messages/' in location: + pos = location.find('/messages/') + pos1 = location.rfind('/', 0, pos - 1) + self._queue_name = location[pos1 + 1:pos] + + def delete(self): + ''' Deletes itself if find queue name or topic name and subscription + name. ''' + if self._queue_name: + self.service_bus_service.delete_queue_message( + self._queue_name, + self.broker_properties['SequenceNumber'], + self.broker_properties['LockToken']) + elif self._topic_name and self._subscription_name: + self.service_bus_service.delete_subscription_message( + self._topic_name, + self._subscription_name, + self.broker_properties['SequenceNumber'], + self.broker_properties['LockToken']) + else: + raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE) + + def unlock(self): + ''' Unlocks itself if find queue name or topic name and subscription + name. ''' + if self._queue_name: + self.service_bus_service.unlock_queue_message( + self._queue_name, + self.broker_properties['SequenceNumber'], + self.broker_properties['LockToken']) + elif self._topic_name and self._subscription_name: + self.service_bus_service.unlock_subscription_message( + self._topic_name, + self._subscription_name, + self.broker_properties['SequenceNumber'], + self.broker_properties['LockToken']) + else: + raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK) + + def add_headers(self, request): + ''' add addtional headers to request for message request.''' + + # Adds custom properties + if self.custom_properties: + for name, value in self.custom_properties.items(): + if sys.version_info < (3,) and isinstance(value, unicode): + request.headers.append( + (name, '"' + value.encode('utf-8') + '"')) + elif isinstance(value, str): + request.headers.append((name, '"' + str(value) + '"')) + elif isinstance(value, datetime): + request.headers.append( + (name, '"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '"')) + else: + request.headers.append((name, str(value).lower())) + + # Adds content-type + request.headers.append(('Content-Type', self.type)) + + # Adds BrokerProperties + if self.broker_properties: + request.headers.append( + ('BrokerProperties', str(self.broker_properties))) + + return request.headers + + +def _create_message(response, service_instance): + ''' Create message from response. + + response: response from service bus cloud server. + service_instance: the service bus client. + ''' + respbody = response.body + custom_properties = {} + broker_properties = None + message_type = None + message_location = None + + # gets all information from respheaders. + for name, value in response.headers: + if name.lower() == 'brokerproperties': + broker_properties = ast.literal_eval(value) + elif name.lower() == 'content-type': + message_type = value + elif name.lower() == 'location': + message_location = value + elif name.lower() not in ['content-type', + 'brokerproperties', + 'transfer-encoding', + 'server', + 'location', + 'date']: + if '"' in value: + value = value[1:-1] + try: + custom_properties[name] = datetime.strptime( + value, '%a, %d %b %Y %H:%M:%S GMT') + except ValueError: + custom_properties[name] = value + else: # only int, float or boolean + if value.lower() == 'true': + custom_properties[name] = True + elif value.lower() == 'false': + custom_properties[name] = False + # int('3.1') doesn't work so need to get float('3.14') first + elif str(int(float(value))) == value: + custom_properties[name] = int(value) + else: + custom_properties[name] = float(value) + + if message_type == None: + message = Message( + respbody, service_instance, message_location, custom_properties, + 'application/atom+xml;type=entry;charset=utf-8', broker_properties) + else: + message = Message(respbody, service_instance, message_location, + custom_properties, message_type, broker_properties) + return message + +# convert functions + + +def _convert_response_to_rule(response): + return _convert_xml_to_rule(response.body) + + +def _convert_xml_to_rule(xmlstr): + ''' Converts response xml to rule object. + + The format of xml for rule: +<entry xmlns='http://www.w3.org/2005/Atom'> +<content type='application/xml'> +<RuleDescription + xmlns:i="http://www.w3.org/2001/XMLSchema-instance" + xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> + <Filter i:type="SqlFilterExpression"> + <SqlExpression>MyProperty='XYZ'</SqlExpression> + </Filter> + <Action i:type="SqlFilterAction"> + <SqlExpression>set MyProperty2 = 'ABC'</SqlExpression> + </Action> +</RuleDescription> +</content> +</entry> + ''' + xmldoc = minidom.parseString(xmlstr) + rule = Rule() + + for rule_desc in _get_children_from_path(xmldoc, + 'entry', + 'content', + 'RuleDescription'): + for xml_filter in _get_child_nodes(rule_desc, 'Filter'): + filter_type = xml_filter.getAttributeNS( + XML_SCHEMA_NAMESPACE, 'type') + setattr(rule, 'filter_type', str(filter_type)) + if xml_filter.childNodes: + + for expr in _get_child_nodes(xml_filter, 'SqlExpression'): + setattr(rule, 'filter_expression', + expr.firstChild.nodeValue) + + for xml_action in _get_child_nodes(rule_desc, 'Action'): + action_type = xml_action.getAttributeNS( + XML_SCHEMA_NAMESPACE, 'type') + setattr(rule, 'action_type', str(action_type)) + if xml_action.childNodes: + action_expression = xml_action.childNodes[0].firstChild + if action_expression: + setattr(rule, 'action_expression', + action_expression.nodeValue) + + # extract id, updated and name value from feed entry and set them of rule. + for name, value in _get_entry_properties(xmlstr, True, '/rules').items(): + setattr(rule, name, value) + + return rule + + +def _convert_response_to_queue(response): + return _convert_xml_to_queue(response.body) + + +def _parse_bool(value): + if value.lower() == 'true': + return True + return False + + +def _convert_xml_to_queue(xmlstr): + ''' Converts xml response to queue object. + + The format of xml response for queue: +<QueueDescription + xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\"> + <MaxSizeInBytes>10000</MaxSizeInBytes> + <DefaultMessageTimeToLive>PT5M</DefaultMessageTimeToLive> + <LockDuration>PT2M</LockDuration> + <RequiresGroupedReceives>False</RequiresGroupedReceives> + <SupportsDuplicateDetection>False</SupportsDuplicateDetection> + ... +</QueueDescription> + + ''' + xmldoc = minidom.parseString(xmlstr) + queue = Queue() + + invalid_queue = True + # get node for each attribute in Queue class, if nothing found then the + # response is not valid xml for Queue. + for desc in _get_children_from_path(xmldoc, + 'entry', + 'content', + 'QueueDescription'): + node_value = _get_first_child_node_value(desc, 'LockDuration') + if node_value is not None: + queue.lock_duration = node_value + invalid_queue = False + + node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes') + if node_value is not None: + queue.max_size_in_megabytes = int(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value( + desc, 'RequiresDuplicateDetection') + if node_value is not None: + queue.requires_duplicate_detection = _parse_bool(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value(desc, 'RequiresSession') + if node_value is not None: + queue.requires_session = _parse_bool(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value( + desc, 'DefaultMessageTimeToLive') + if node_value is not None: + queue.default_message_time_to_live = node_value + invalid_queue = False + + node_value = _get_first_child_node_value( + desc, 'DeadLetteringOnMessageExpiration') + if node_value is not None: + queue.dead_lettering_on_message_expiration = _parse_bool(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value( + desc, 'DuplicateDetectionHistoryTimeWindow') + if node_value is not None: + queue.duplicate_detection_history_time_window = node_value + invalid_queue = False + + node_value = _get_first_child_node_value( + desc, 'EnableBatchedOperations') + if node_value is not None: + queue.enable_batched_operations = _parse_bool(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount') + if node_value is not None: + queue.max_delivery_count = int(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value(desc, 'MessageCount') + if node_value is not None: + queue.message_count = int(node_value) + invalid_queue = False + + node_value = _get_first_child_node_value(desc, 'SizeInBytes') + if node_value is not None: + queue.size_in_bytes = int(node_value) + invalid_queue = False + + if invalid_queue: + raise WindowsAzureError(_ERROR_QUEUE_NOT_FOUND) + + # extract id, updated and name value from feed entry and set them of queue. + for name, value in _get_entry_properties(xmlstr, True).items(): + setattr(queue, name, value) + + return queue + + +def _convert_response_to_topic(response): + return _convert_xml_to_topic(response.body) + + +def _convert_xml_to_topic(xmlstr): + '''Converts xml response to topic + + The xml format for topic: +<entry xmlns='http://www.w3.org/2005/Atom'> + <content type='application/xml'> + <TopicDescription + xmlns:i="http://www.w3.org/2001/XMLSchema-instance" + xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> + <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive> + <MaxSizeInMegabytes>1024</MaxSizeInMegabytes> + <RequiresDuplicateDetection>false</RequiresDuplicateDetection> + <DuplicateDetectionHistoryTimeWindow>P7D</DuplicateDetectionHistoryTimeWindow> + <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions> + </TopicDescription> + </content> +</entry> + ''' + xmldoc = minidom.parseString(xmlstr) + topic = Topic() + + invalid_topic = True + + # get node for each attribute in Topic class, if nothing found then the + # response is not valid xml for Topic. + for desc in _get_children_from_path(xmldoc, + 'entry', + 'content', + 'TopicDescription'): + invalid_topic = True + node_value = _get_first_child_node_value( + desc, 'DefaultMessageTimeToLive') + if node_value is not None: + topic.default_message_time_to_live = node_value + invalid_topic = False + node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes') + if node_value is not None: + topic.max_size_in_megabytes = int(node_value) + invalid_topic = False + node_value = _get_first_child_node_value( + desc, 'RequiresDuplicateDetection') + if node_value is not None: + topic.requires_duplicate_detection = _parse_bool(node_value) + invalid_topic = False + node_value = _get_first_child_node_value( + desc, 'DuplicateDetectionHistoryTimeWindow') + if node_value is not None: + topic.duplicate_detection_history_time_window = node_value + invalid_topic = False + node_value = _get_first_child_node_value( + desc, 'EnableBatchedOperations') + if node_value is not None: + topic.enable_batched_operations = _parse_bool(node_value) + invalid_topic = False + node_value = _get_first_child_node_value(desc, 'SizeInBytes') + if node_value is not None: + topic.size_in_bytes = int(node_value) + invalid_topic = False + + if invalid_topic: + raise WindowsAzureError(_ERROR_TOPIC_NOT_FOUND) + + # extract id, updated and name value from feed entry and set them of topic. + for name, value in _get_entry_properties(xmlstr, True).items(): + setattr(topic, name, value) + return topic + + +def _convert_response_to_subscription(response): + return _convert_xml_to_subscription(response.body) + + +def _convert_xml_to_subscription(xmlstr): + '''Converts xml response to subscription + + The xml format for subscription: +<entry xmlns='http://www.w3.org/2005/Atom'> + <content type='application/xml'> + <SubscriptionDescription + xmlns:i="http://www.w3.org/2001/XMLSchema-instance" + xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> + <LockDuration>PT5M</LockDuration> + <RequiresSession>false</RequiresSession> + <DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive> + <DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration> + <DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions> + </SubscriptionDescription> + </content> +</entry> + ''' + xmldoc = minidom.parseString(xmlstr) + subscription = Subscription() + + for desc in _get_children_from_path(xmldoc, + 'entry', + 'content', + 'SubscriptionDescription'): + node_value = _get_first_child_node_value(desc, 'LockDuration') + if node_value is not None: + subscription.lock_duration = node_value + + node_value = _get_first_child_node_value( + desc, 'RequiresSession') + if node_value is not None: + subscription.requires_session = _parse_bool(node_value) + + node_value = _get_first_child_node_value( + desc, 'DefaultMessageTimeToLive') + if node_value is not None: + subscription.default_message_time_to_live = node_value + + node_value = _get_first_child_node_value( + desc, 'DeadLetteringOnFilterEvaluationExceptions') + if node_value is not None: + subscription.dead_lettering_on_filter_evaluation_exceptions = \ + _parse_bool(node_value) + + node_value = _get_first_child_node_value( + desc, 'DeadLetteringOnMessageExpiration') + if node_value is not None: + subscription.dead_lettering_on_message_expiration = \ + _parse_bool(node_value) + + node_value = _get_first_child_node_value( + desc, 'EnableBatchedOperations') + if node_value is not None: + subscription.enable_batched_operations = _parse_bool(node_value) + + node_value = _get_first_child_node_value( + desc, 'MaxDeliveryCount') + if node_value is not None: + subscription.max_delivery_count = int(node_value) + + node_value = _get_first_child_node_value( + desc, 'MessageCount') + if node_value is not None: + subscription.message_count = int(node_value) + + for name, value in _get_entry_properties(xmlstr, + True, + '/subscriptions').items(): + setattr(subscription, name, value) + + return subscription + + +def _convert_subscription_to_xml(subscription): + ''' + Converts a subscription object to xml to send. The order of each field of + subscription in xml is very important so we can't simple call + convert_class_to_xml. + + subscription: the subsciption object to be converted. + ''' + + subscription_body = '<SubscriptionDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">' + if subscription: + if subscription.lock_duration is not None: + subscription_body += ''.join( + ['<LockDuration>', + str(subscription.lock_duration), + '</LockDuration>']) + + if subscription.requires_session is not None: + subscription_body += ''.join( + ['<RequiresSession>', + str(subscription.requires_session).lower(), + '</RequiresSession>']) + + if subscription.default_message_time_to_live is not None: + subscription_body += ''.join( + ['<DefaultMessageTimeToLive>', + str(subscription.default_message_time_to_live), + '</DefaultMessageTimeToLive>']) + + if subscription.dead_lettering_on_message_expiration is not None: + subscription_body += ''.join( + ['<DeadLetteringOnMessageExpiration>', + str(subscription.dead_lettering_on_message_expiration).lower(), + '</DeadLetteringOnMessageExpiration>']) + + if subscription.dead_lettering_on_filter_evaluation_exceptions is not None: + subscription_body += ''.join( + ['<DeadLetteringOnFilterEvaluationExceptions>', + str(subscription.dead_lettering_on_filter_evaluation_exceptions).lower(), + '</DeadLetteringOnFilterEvaluationExceptions>']) + + if subscription.enable_batched_operations is not None: + subscription_body += ''.join( + ['<EnableBatchedOperations>', + str(subscription.enable_batched_operations).lower(), + '</EnableBatchedOperations>']) + + if subscription.max_delivery_count is not None: + subscription_body += ''.join( + ['<MaxDeliveryCount>', + str(subscription.max_delivery_count), + '</MaxDeliveryCount>']) + + if subscription.message_count is not None: + subscription_body += ''.join( + ['<MessageCount>', + str(subscription.message_count), + '</MessageCount>']) + + subscription_body += '</SubscriptionDescription>' + return _create_entry(subscription_body) + + +def _convert_rule_to_xml(rule): + ''' + Converts a rule object to xml to send. The order of each field of rule + in xml is very important so we cann't simple call convert_class_to_xml. + + rule: the rule object to be converted. + ''' + rule_body = '<RuleDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">' + if rule: + if rule.filter_type: + rule_body += ''.join( + ['<Filter i:type="', + xml_escape(rule.filter_type), + '">']) + if rule.filter_type == 'CorrelationFilter': + rule_body += ''.join( + ['<CorrelationId>', + xml_escape(rule.filter_expression), + '</CorrelationId>']) + else: + rule_body += ''.join( + ['<SqlExpression>', + xml_escape(rule.filter_expression), + '</SqlExpression>']) + rule_body += '<CompatibilityLevel>20</CompatibilityLevel>' + rule_body += '</Filter>' + if rule.action_type: + rule_body += ''.join( + ['<Action i:type="', + xml_escape(rule.action_type), + '">']) + if rule.action_type == 'SqlRuleAction': + rule_body += ''.join( + ['<SqlExpression>', + xml_escape(rule.action_expression), + '</SqlExpression>']) + rule_body += '<CompatibilityLevel>20</CompatibilityLevel>' + rule_body += '</Action>' + rule_body += '</RuleDescription>' + + return _create_entry(rule_body) + + +def _convert_topic_to_xml(topic): + ''' + Converts a topic object to xml to send. The order of each field of topic + in xml is very important so we cann't simple call convert_class_to_xml. + + topic: the topic object to be converted. + ''' + + topic_body = '<TopicDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">' + if topic: + if topic.default_message_time_to_live is not None: + topic_body += ''.join( + ['<DefaultMessageTimeToLive>', + str(topic.default_message_time_to_live), + '</DefaultMessageTimeToLive>']) + + if topic.max_size_in_megabytes is not None: + topic_body += ''.join( + ['<MaxSizeInMegabytes>', + str(topic.max_size_in_megabytes), + '</MaxSizeInMegabytes>']) + + if topic.requires_duplicate_detection is not None: + topic_body += ''.join( + ['<RequiresDuplicateDetection>', + str(topic.requires_duplicate_detection).lower(), + '</RequiresDuplicateDetection>']) + + if topic.duplicate_detection_history_time_window is not None: + topic_body += ''.join( + ['<DuplicateDetectionHistoryTimeWindow>', + str(topic.duplicate_detection_history_time_window), + '</DuplicateDetectionHistoryTimeWindow>']) + + if topic.enable_batched_operations is not None: + topic_body += ''.join( + ['<EnableBatchedOperations>', + str(topic.enable_batched_operations).lower(), + '</EnableBatchedOperations>']) + + if topic.size_in_bytes is not None: + topic_body += ''.join( + ['<SizeInBytes>', + str(topic.size_in_bytes), + '</SizeInBytes>']) + + topic_body += '</TopicDescription>' + + return _create_entry(topic_body) + + +def _convert_queue_to_xml(queue): + ''' + Converts a queue object to xml to send. The order of each field of queue + in xml is very important so we cann't simple call convert_class_to_xml. + + queue: the queue object to be converted. + ''' + queue_body = '<QueueDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">' + if queue: + if queue.lock_duration: + queue_body += ''.join( + ['<LockDuration>', + str(queue.lock_duration), + '</LockDuration>']) + + if queue.max_size_in_megabytes is not None: + queue_body += ''.join( + ['<MaxSizeInMegabytes>', + str(queue.max_size_in_megabytes), + '</MaxSizeInMegabytes>']) + + if queue.requires_duplicate_detection is not None: + queue_body += ''.join( + ['<RequiresDuplicateDetection>', + str(queue.requires_duplicate_detection).lower(), + '</RequiresDuplicateDetection>']) + + if queue.requires_session is not None: + queue_body += ''.join( + ['<RequiresSession>', + str(queue.requires_session).lower(), + '</RequiresSession>']) + + if queue.default_message_time_to_live is not None: + queue_body += ''.join( + ['<DefaultMessageTimeToLive>', + str(queue.default_message_time_to_live), + '</DefaultMessageTimeToLive>']) + + if queue.dead_lettering_on_message_expiration is not None: + queue_body += ''.join( + ['<DeadLetteringOnMessageExpiration>', + str(queue.dead_lettering_on_message_expiration).lower(), + '</DeadLetteringOnMessageExpiration>']) + + if queue.duplicate_detection_history_time_window is not None: + queue_body += ''.join( + ['<DuplicateDetectionHistoryTimeWindow>', + str(queue.duplicate_detection_history_time_window), + '</DuplicateDetectionHistoryTimeWindow>']) + + if queue.max_delivery_count is not None: + queue_body += ''.join( + ['<MaxDeliveryCount>', + str(queue.max_delivery_count), + '</MaxDeliveryCount>']) + + if queue.enable_batched_operations is not None: + queue_body += ''.join( + ['<EnableBatchedOperations>', + str(queue.enable_batched_operations).lower(), + '</EnableBatchedOperations>']) + + if queue.size_in_bytes is not None: + queue_body += ''.join( + ['<SizeInBytes>', + str(queue.size_in_bytes), + '</SizeInBytes>']) + + if queue.message_count is not None: + queue_body += ''.join( + ['<MessageCount>', + str(queue.message_count), + '</MessageCount>']) + + queue_body += '</QueueDescription>' + return _create_entry(queue_body) + + +def _service_bus_error_handler(http_error): + ''' Simple error handler for service bus service. ''' + return _general_error_handler(http_error) + +from azure.servicebus.servicebusservice import ServiceBusService diff --git a/awx/lib/site-packages/azure/servicebus/servicebusservice.py b/awx/lib/site-packages/azure/servicebus/servicebusservice.py new file mode 100644 index 0000000000..894f018ba2 --- /dev/null +++ b/awx/lib/site-packages/azure/servicebus/servicebusservice.py @@ -0,0 +1,914 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import os +import time + +from azure import ( + WindowsAzureError, + SERVICE_BUS_HOST_BASE, + _convert_response_to_feeds, + _dont_fail_not_exist, + _dont_fail_on_exist, + _get_request_body, + _get_request_body_bytes_only, + _int_or_none, + _str, + _update_request_uri_query, + url_quote, + url_unquote, + _validate_not_none, + ) +from azure.http import ( + HTTPError, + HTTPRequest, + ) +from azure.http.httpclient import _HTTPClient +from azure.servicebus import ( + AZURE_SERVICEBUS_NAMESPACE, + AZURE_SERVICEBUS_ACCESS_KEY, + AZURE_SERVICEBUS_ISSUER, + _convert_topic_to_xml, + _convert_response_to_topic, + _convert_queue_to_xml, + _convert_response_to_queue, + _convert_subscription_to_xml, + _convert_response_to_subscription, + _convert_rule_to_xml, + _convert_response_to_rule, + _convert_xml_to_queue, + _convert_xml_to_topic, + _convert_xml_to_subscription, + _convert_xml_to_rule, + _create_message, + _service_bus_error_handler, + ) + +# Token cache for Authentication +# Shared by the different instances of ServiceBusService +_tokens = {} + + +class ServiceBusService(object): + + def __init__(self, service_namespace=None, account_key=None, issuer=None, + x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE): + # x_ms_version is not used, but the parameter is kept for backwards + # compatibility + self.requestid = None + self.service_namespace = service_namespace + self.account_key = account_key + self.issuer = issuer + self.host_base = host_base + + # Get service namespace, account key and issuer. + # If they are set when constructing, then use them, else find them + # from environment variables. + if not self.service_namespace: + self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE) + if not self.account_key: + self.account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY) + if not self.issuer: + self.issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER) + + if not self.service_namespace or \ + not self.account_key or not self.issuer: + raise WindowsAzureError( + 'You need to provide servicebus namespace, access key and Issuer') + + self._httpclient = _HTTPClient(service_instance=self, + service_namespace=self.service_namespace, + account_key=self.account_key, + issuer=self.issuer) + self._filter = self._httpclient.perform_request + + def with_filter(self, filter): + ''' + Returns a new service which will process requests with the specified + filter. Filtering operations can include logging, automatic retrying, + etc... The filter is a lambda which receives the HTTPRequest and + another lambda. The filter can perform any pre-processing on the + request, pass it off to the next lambda, and then perform any + post-processing on the response. + ''' + res = ServiceBusService(self.service_namespace, self.account_key, + self.issuer) + old_filter = self._filter + + def new_filter(request): + return filter(request, old_filter) + + res._filter = new_filter + return res + + def set_proxy(self, host, port, user=None, password=None): + ''' + Sets the proxy server host and port for the HTTP CONNECT Tunnelling. + + host: Address of the proxy. Ex: '192.168.0.100' + port: Port of the proxy. Ex: 6000 + user: User for proxy authorization. + password: Password for proxy authorization. + ''' + self._httpclient.set_proxy(host, port, user, password) + + def create_queue(self, queue_name, queue=None, fail_on_exist=False): + ''' + Creates a new queue. Once created, this queue's resource manifest is + immutable. + + queue_name: Name of the queue to create. + queue: Queue object to create. + fail_on_exist: + Specify whether to throw an exception when the queue exists. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '' + request.body = _get_request_body(_convert_queue_to_xml(queue)) + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_on_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def delete_queue(self, queue_name, fail_not_exist=False): + ''' + Deletes an existing queue. This operation will also remove all + associated state including messages in the queue. + + queue_name: Name of the queue to delete. + fail_not_exist: + Specify whether to throw an exception if the queue doesn't exist. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_queue(self, queue_name): + ''' + Retrieves an existing queue. + + queue_name: Name of the queue. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_queue(response) + + def list_queues(self): + ''' + Enumerates the queues in the service namespace. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/$Resources/Queues' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_feeds(response, _convert_xml_to_queue) + + def create_topic(self, topic_name, topic=None, fail_on_exist=False): + ''' + Creates a new topic. Once created, this topic resource manifest is + immutable. + + topic_name: Name of the topic to create. + topic: Topic object to create. + fail_on_exist: + Specify whether to throw an exception when the topic exists. + ''' + _validate_not_none('topic_name', topic_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '' + request.body = _get_request_body(_convert_topic_to_xml(topic)) + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_on_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def delete_topic(self, topic_name, fail_not_exist=False): + ''' + Deletes an existing topic. This operation will also remove all + associated state including associated subscriptions. + + topic_name: Name of the topic to delete. + fail_not_exist: + Specify whether throw exception when topic doesn't exist. + ''' + _validate_not_none('topic_name', topic_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_topic(self, topic_name): + ''' + Retrieves the description for the specified topic. + + topic_name: Name of the topic. + ''' + _validate_not_none('topic_name', topic_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_topic(response) + + def list_topics(self): + ''' + Retrieves the topics in the service namespace. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/$Resources/Topics' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_feeds(response, _convert_xml_to_topic) + + def create_rule(self, topic_name, subscription_name, rule_name, rule=None, + fail_on_exist=False): + ''' + Creates a new rule. Once created, this rule's resource manifest is + immutable. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + rule_name: Name of the rule. + fail_on_exist: + Specify whether to throw an exception when the rule exists. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + _validate_not_none('rule_name', rule_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '/subscriptions/' + \ + _str(subscription_name) + \ + '/rules/' + _str(rule_name) + '' + request.body = _get_request_body(_convert_rule_to_xml(rule)) + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_on_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def delete_rule(self, topic_name, subscription_name, rule_name, + fail_not_exist=False): + ''' + Deletes an existing rule. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + rule_name: + Name of the rule to delete. DEFAULT_RULE_NAME=$Default. + Use DEFAULT_RULE_NAME to delete default rule for the subscription. + fail_not_exist: + Specify whether throw exception when rule doesn't exist. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + _validate_not_none('rule_name', rule_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '/subscriptions/' + \ + _str(subscription_name) + \ + '/rules/' + _str(rule_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_rule(self, topic_name, subscription_name, rule_name): + ''' + Retrieves the description for the specified rule. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + rule_name: Name of the rule. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + _validate_not_none('rule_name', rule_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '/subscriptions/' + \ + _str(subscription_name) + \ + '/rules/' + _str(rule_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_rule(response) + + def list_rules(self, topic_name, subscription_name): + ''' + Retrieves the rules that exist under the specified subscription. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(topic_name) + '/subscriptions/' + \ + _str(subscription_name) + '/rules/' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_feeds(response, _convert_xml_to_rule) + + def create_subscription(self, topic_name, subscription_name, + subscription=None, fail_on_exist=False): + ''' + Creates a new subscription. Once created, this subscription resource + manifest is immutable. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + fail_on_exist: + Specify whether throw exception when subscription exists. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(topic_name) + '/subscriptions/' + _str(subscription_name) + '' + request.body = _get_request_body( + _convert_subscription_to_xml(subscription)) + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_on_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def delete_subscription(self, topic_name, subscription_name, + fail_not_exist=False): + ''' + Deletes an existing subscription. + + topic_name: Name of the topic. + subscription_name: Name of the subscription to delete. + fail_not_exist: + Specify whether to throw an exception when the subscription + doesn't exist. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + \ + _str(topic_name) + '/subscriptions/' + _str(subscription_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_subscription(self, topic_name, subscription_name): + ''' + Gets an existing subscription. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(topic_name) + '/subscriptions/' + _str(subscription_name) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_subscription(response) + + def list_subscriptions(self, topic_name): + ''' + Retrieves the subscriptions in the specified topic. + + topic_name: Name of the topic. + ''' + _validate_not_none('topic_name', topic_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '/subscriptions/' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _convert_response_to_feeds(response, + _convert_xml_to_subscription) + + def send_topic_message(self, topic_name, message=None): + ''' + Enqueues a message into the specified topic. The limit to the number + of messages which may be present in the topic is governed by the + message size in MaxTopicSizeInBytes. If this message causes the topic + to exceed its quota, a quota exceeded error is returned and the + message will be rejected. + + topic_name: Name of the topic. + message: Message object containing message body and properties. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('message', message) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + '/messages' + request.headers = message.add_headers(request) + request.body = _get_request_body_bytes_only( + 'message.body', message.body) + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + self._perform_request(request) + + def peek_lock_subscription_message(self, topic_name, subscription_name, + timeout='60'): + ''' + This operation is used to atomically retrieve and lock a message for + processing. The message is guaranteed not to be delivered to other + receivers during the lock duration period specified in buffer + description. Once the lock expires, the message will be available to + other receivers (on the same subscription only) during the lock + duration period specified in the topic description. Once the lock + expires, the message will be available to other receivers. In order to + complete processing of the message, the receiver should issue a delete + command with the lock ID received from this operation. To abandon + processing of the message and unlock it for other receivers, an Unlock + Message command should be issued, or the lock duration period can + expire. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/' + \ + _str(topic_name) + '/subscriptions/' + \ + _str(subscription_name) + '/messages/head' + request.query = [('timeout', _int_or_none(timeout))] + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _create_message(response, self) + + def unlock_subscription_message(self, topic_name, subscription_name, + sequence_number, lock_token): + ''' + Unlock a message for processing by other receivers on a given + subscription. This operation deletes the lock object, causing the + message to be unlocked. A message must have first been locked by a + receiver before this operation is called. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + sequence_number: + The sequence number of the message to be unlocked as returned in + BrokerProperties['SequenceNumber'] by the Peek Message operation. + lock_token: + The ID of the lock as returned by the Peek Message operation in + BrokerProperties['LockToken'] + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + _validate_not_none('sequence_number', sequence_number) + _validate_not_none('lock_token', lock_token) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + \ + '/subscriptions/' + str(subscription_name) + \ + '/messages/' + _str(sequence_number) + \ + '/' + _str(lock_token) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + self._perform_request(request) + + def read_delete_subscription_message(self, topic_name, subscription_name, + timeout='60'): + ''' + Read and delete a message from a subscription as an atomic operation. + This operation should be used when a best-effort guarantee is + sufficient for an application; that is, using this operation it is + possible for messages to be lost if processing fails. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + \ + '/subscriptions/' + _str(subscription_name) + \ + '/messages/head' + request.query = [('timeout', _int_or_none(timeout))] + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _create_message(response, self) + + def delete_subscription_message(self, topic_name, subscription_name, + sequence_number, lock_token): + ''' + Completes processing on a locked message and delete it from the + subscription. This operation should only be called after processing a + previously locked message is successful to maintain At-Least-Once + delivery assurances. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + sequence_number: + The sequence number of the message to be deleted as returned in + BrokerProperties['SequenceNumber'] by the Peek Message operation. + lock_token: + The ID of the lock as returned by the Peek Message operation in + BrokerProperties['LockToken'] + ''' + _validate_not_none('topic_name', topic_name) + _validate_not_none('subscription_name', subscription_name) + _validate_not_none('sequence_number', sequence_number) + _validate_not_none('lock_token', lock_token) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(topic_name) + \ + '/subscriptions/' + _str(subscription_name) + \ + '/messages/' + _str(sequence_number) + \ + '/' + _str(lock_token) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + self._perform_request(request) + + def send_queue_message(self, queue_name, message=None): + ''' + Sends a message into the specified queue. The limit to the number of + messages which may be present in the topic is governed by the message + size the MaxTopicSizeInMegaBytes. If this message will cause the queue + to exceed its quota, a quota exceeded error is returned and the + message will be rejected. + + queue_name: Name of the queue. + message: Message object containing message body and properties. + ''' + _validate_not_none('queue_name', queue_name) + _validate_not_none('message', message) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages' + request.headers = message.add_headers(request) + request.body = _get_request_body_bytes_only('message.body', + message.body) + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + self._perform_request(request) + + def peek_lock_queue_message(self, queue_name, timeout='60'): + ''' + Automically retrieves and locks a message from a queue for processing. + The message is guaranteed not to be delivered to other receivers (on + the same subscription only) during the lock duration period specified + in the queue description. Once the lock expires, the message will be + available to other receivers. In order to complete processing of the + message, the receiver should issue a delete command with the lock ID + received from this operation. To abandon processing of the message and + unlock it for other receivers, an Unlock Message command should be + issued, or the lock duration period can expire. + + queue_name: Name of the queue. + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages/head' + request.query = [('timeout', _int_or_none(timeout))] + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _create_message(response, self) + + def unlock_queue_message(self, queue_name, sequence_number, lock_token): + ''' + Unlocks a message for processing by other receivers on a given + subscription. This operation deletes the lock object, causing the + message to be unlocked. A message must have first been locked by a + receiver before this operation is called. + + queue_name: Name of the queue. + sequence_number: + The sequence number of the message to be unlocked as returned in + BrokerProperties['SequenceNumber'] by the Peek Message operation. + lock_token: + The ID of the lock as returned by the Peek Message operation in + BrokerProperties['LockToken'] + ''' + _validate_not_none('queue_name', queue_name) + _validate_not_none('sequence_number', sequence_number) + _validate_not_none('lock_token', lock_token) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + \ + '/messages/' + _str(sequence_number) + \ + '/' + _str(lock_token) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + self._perform_request(request) + + def read_delete_queue_message(self, queue_name, timeout='60'): + ''' + Reads and deletes a message from a queue as an atomic operation. This + operation should be used when a best-effort guarantee is sufficient + for an application; that is, using this operation it is possible for + messages to be lost if processing fails. + + queue_name: Name of the queue. + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages/head' + request.query = [('timeout', _int_or_none(timeout))] + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + response = self._perform_request(request) + + return _create_message(response, self) + + def delete_queue_message(self, queue_name, sequence_number, lock_token): + ''' + Completes processing on a locked message and delete it from the queue. + This operation should only be called after processing a previously + locked message is successful to maintain At-Least-Once delivery + assurances. + + queue_name: Name of the queue. + sequence_number: + The sequence number of the message to be deleted as returned in + BrokerProperties['SequenceNumber'] by the Peek Message operation. + lock_token: + The ID of the lock as returned by the Peek Message operation in + BrokerProperties['LockToken'] + ''' + _validate_not_none('queue_name', queue_name) + _validate_not_none('sequence_number', sequence_number) + _validate_not_none('lock_token', lock_token) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + \ + '/messages/' + _str(sequence_number) + \ + '/' + _str(lock_token) + '' + request.path, request.query = _update_request_uri_query(request) + request.headers = self._update_service_bus_header(request) + self._perform_request(request) + + def receive_queue_message(self, queue_name, peek_lock=True, timeout=60): + ''' + Receive a message from a queue for processing. + + queue_name: Name of the queue. + peek_lock: + Optional. True to retrieve and lock the message. False to read and + delete the message. Default is True (lock). + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + if peek_lock: + return self.peek_lock_queue_message(queue_name, timeout) + else: + return self.read_delete_queue_message(queue_name, timeout) + + def receive_subscription_message(self, topic_name, subscription_name, + peek_lock=True, timeout=60): + ''' + Receive a message from a subscription for processing. + + topic_name: Name of the topic. + subscription_name: Name of the subscription. + peek_lock: + Optional. True to retrieve and lock the message. False to read and + delete the message. Default is True (lock). + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + if peek_lock: + return self.peek_lock_subscription_message(topic_name, + subscription_name, + timeout) + else: + return self.read_delete_subscription_message(topic_name, + subscription_name, + timeout) + + def _get_host(self): + return self.service_namespace + self.host_base + + def _perform_request(self, request): + try: + resp = self._filter(request) + except HTTPError as ex: + return _service_bus_error_handler(ex) + + return resp + + def _update_service_bus_header(self, request): + ''' Add additional headers for service bus. ''' + + if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: + request.headers.append(('Content-Length', str(len(request.body)))) + + # if it is not GET or HEAD request, must set content-type. + if not request.method in ['GET', 'HEAD']: + for name, _ in request.headers: + if 'content-type' == name.lower(): + break + else: + request.headers.append( + ('Content-Type', + 'application/atom+xml;type=entry;charset=utf-8')) + + # Adds authoriaztion header for authentication. + request.headers.append( + ('Authorization', self._sign_service_bus_request(request))) + + return request.headers + + def _sign_service_bus_request(self, request): + ''' return the signed string with token. ''' + + return 'WRAP access_token="' + \ + self._get_token(request.host, request.path) + '"' + + def _token_is_expired(self, token): + ''' Check if token expires or not. ''' + time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=') + time_pos_end = token.find('&', time_pos_begin) + token_expire_time = int(token[time_pos_begin:time_pos_end]) + time_now = time.mktime(time.localtime()) + + # Adding 30 seconds so the token wouldn't be expired when we send the + # token to server. + return (token_expire_time - time_now) < 30 + + def _get_token(self, host, path): + ''' + Returns token for the request. + + host: the service bus service request. + path: the service bus service request. + ''' + wrap_scope = 'http://' + host + path + self.issuer + self.account_key + + # Check whether has unexpired cache, return cached token if it is still + # usable. + if wrap_scope in _tokens: + token = _tokens[wrap_scope] + if not self._token_is_expired(token): + return token + + # get token from accessconstrol server + request = HTTPRequest() + request.protocol_override = 'https' + request.host = host.replace('.servicebus.', '-sb.accesscontrol.') + request.method = 'POST' + request.path = '/WRAPv0.9' + request.body = ('wrap_name=' + url_quote(self.issuer) + + '&wrap_password=' + url_quote(self.account_key) + + '&wrap_scope=' + + url_quote('http://' + host + path)).encode('utf-8') + request.headers.append(('Content-Length', str(len(request.body)))) + resp = self._httpclient.perform_request(request) + + token = resp.body.decode('utf-8') + token = url_unquote(token[token.find('=') + 1:token.rfind('&')]) + _tokens[wrap_scope] = token + + return token diff --git a/awx/lib/site-packages/azure/servicemanagement/__init__.py b/awx/lib/site-packages/azure/servicemanagement/__init__.py new file mode 100644 index 0000000000..caca5db4ba --- /dev/null +++ b/awx/lib/site-packages/azure/servicemanagement/__init__.py @@ -0,0 +1,1692 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from xml.dom import minidom +from azure import ( + WindowsAzureData, + _Base64String, + _create_entry, + _dict_of, + _encode_base64, + _general_error_handler, + _get_children_from_path, + _get_first_child_node_value, + _list_of, + _scalar_list_of, + _str, + ) + +#----------------------------------------------------------------------------- +# Constants for Azure app environment settings. +AZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE' +AZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID' + +# x-ms-version for service management. +X_MS_VERSION = '2013-06-01' + +#----------------------------------------------------------------------------- +# Data classes + + +class StorageServices(WindowsAzureData): + + def __init__(self): + self.storage_services = _list_of(StorageService) + + def __iter__(self): + return iter(self.storage_services) + + def __len__(self): + return len(self.storage_services) + + def __getitem__(self, index): + return self.storage_services[index] + + +class StorageService(WindowsAzureData): + + def __init__(self): + self.url = '' + self.service_name = '' + self.storage_service_properties = StorageAccountProperties() + self.storage_service_keys = StorageServiceKeys() + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') + self.capabilities = _scalar_list_of(str, 'Capability') + + +class StorageAccountProperties(WindowsAzureData): + + def __init__(self): + self.description = u'' + self.affinity_group = u'' + self.location = u'' + self.label = _Base64String() + self.status = u'' + self.endpoints = _scalar_list_of(str, 'Endpoint') + self.geo_replication_enabled = False + self.geo_primary_region = u'' + self.status_of_primary = u'' + self.geo_secondary_region = u'' + self.status_of_secondary = u'' + self.last_geo_failover_time = u'' + + +class StorageServiceKeys(WindowsAzureData): + + def __init__(self): + self.primary = u'' + self.secondary = u'' + + +class Locations(WindowsAzureData): + + def __init__(self): + self.locations = _list_of(Location) + + def __iter__(self): + return iter(self.locations) + + def __len__(self): + return len(self.locations) + + def __getitem__(self, index): + return self.locations[index] + + +class Location(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.display_name = u'' + self.available_services = _scalar_list_of(str, 'AvailableService') + + +class AffinityGroup(WindowsAzureData): + + def __init__(self): + self.name = '' + self.label = _Base64String() + self.description = u'' + self.location = u'' + self.hosted_services = HostedServices() + self.storage_services = StorageServices() + self.capabilities = _scalar_list_of(str, 'Capability') + + +class AffinityGroups(WindowsAzureData): + + def __init__(self): + self.affinity_groups = _list_of(AffinityGroup) + + def __iter__(self): + return iter(self.affinity_groups) + + def __len__(self): + return len(self.affinity_groups) + + def __getitem__(self, index): + return self.affinity_groups[index] + + +class HostedServices(WindowsAzureData): + + def __init__(self): + self.hosted_services = _list_of(HostedService) + + def __iter__(self): + return iter(self.hosted_services) + + def __len__(self): + return len(self.hosted_services) + + def __getitem__(self, index): + return self.hosted_services[index] + + +class HostedService(WindowsAzureData): + + def __init__(self): + self.url = u'' + self.service_name = u'' + self.hosted_service_properties = HostedServiceProperties() + self.deployments = Deployments() + + +class HostedServiceProperties(WindowsAzureData): + + def __init__(self): + self.description = u'' + self.location = u'' + self.affinity_group = u'' + self.label = _Base64String() + self.status = u'' + self.date_created = u'' + self.date_last_modified = u'' + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') + + +class VirtualNetworkSites(WindowsAzureData): + + def __init__(self): + self.virtual_network_sites = _list_of(VirtualNetworkSite) + + def __iter__(self): + return iter(self.virtual_network_sites) + + def __len__(self): + return len(self.virtual_network_sites) + + def __getitem__(self, index): + return self.virtual_network_sites[index] + + +class VirtualNetworkSite(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.id = u'' + self.affinity_group = u'' + self.subnets = Subnets() + + +class Subnets(WindowsAzureData): + + def __init__(self): + self.subnets = _list_of(Subnet) + + def __iter__(self): + return iter(self.subnets) + + def __len__(self): + return len(self.subnets) + + def __getitem__(self, index): + return self.subnets[index] + + +class Subnet(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.address_prefix = u'' + + + +class Deployments(WindowsAzureData): + + def __init__(self): + self.deployments = _list_of(Deployment) + + def __iter__(self): + return iter(self.deployments) + + def __len__(self): + return len(self.deployments) + + def __getitem__(self, index): + return self.deployments[index] + + +class Deployment(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.deployment_slot = u'' + self.private_id = u'' + self.status = u'' + self.label = _Base64String() + self.url = u'' + self.configuration = _Base64String() + self.role_instance_list = RoleInstanceList() + self.upgrade_status = UpgradeStatus() + self.upgrade_domain_count = u'' + self.role_list = RoleList() + self.sdk_version = u'' + self.input_endpoint_list = InputEndpoints() + self.locked = False + self.rollback_allowed = False + self.persistent_vm_downtime_info = PersistentVMDowntimeInfo() + self.created_time = u'' + self.virtual_network_name = u'' + self.last_modified_time = u'' + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') + + +class RoleInstanceList(WindowsAzureData): + + def __init__(self): + self.role_instances = _list_of(RoleInstance) + + def __iter__(self): + return iter(self.role_instances) + + def __len__(self): + return len(self.role_instances) + + def __getitem__(self, index): + return self.role_instances[index] + + +class RoleInstance(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.instance_name = u'' + self.instance_status = u'' + self.instance_upgrade_domain = 0 + self.instance_fault_domain = 0 + self.instance_size = u'' + self.instance_state_details = u'' + self.instance_error_code = u'' + self.ip_address = u'' + self.instance_endpoints = InstanceEndpoints() + self.power_state = u'' + self.fqdn = u'' + self.host_name = u'' + + +class InstanceEndpoints(WindowsAzureData): + + def __init__(self): + self.instance_endpoints = _list_of(InstanceEndpoint) + + def __iter__(self): + return iter(self.instance_endpoints) + + def __len__(self): + return len(self.instance_endpoints) + + def __getitem__(self, index): + return self.instance_endpoints[index] + + +class InstanceEndpoint(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.vip = u'' + self.public_port = u'' + self.local_port = u'' + self.protocol = u'' + + +class UpgradeStatus(WindowsAzureData): + + def __init__(self): + self.upgrade_type = u'' + self.current_upgrade_domain_state = u'' + self.current_upgrade_domain = u'' + + +class InputEndpoints(WindowsAzureData): + + def __init__(self): + self.input_endpoints = _list_of(InputEndpoint) + + def __iter__(self): + return iter(self.input_endpoints) + + def __len__(self): + return len(self.input_endpoints) + + def __getitem__(self, index): + return self.input_endpoints[index] + + +class InputEndpoint(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.vip = u'' + self.port = u'' + + +class RoleList(WindowsAzureData): + + def __init__(self): + self.roles = _list_of(Role) + + def __iter__(self): + return iter(self.roles) + + def __len__(self): + return len(self.roles) + + def __getitem__(self, index): + return self.roles[index] + + +class Role(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.role_type = u'' + self.os_version = u'' + self.configuration_sets = ConfigurationSets() + self.availability_set_name = u'' + self.data_virtual_hard_disks = DataVirtualHardDisks() + self.os_virtual_hard_disk = OSVirtualHardDisk() + self.role_size = u'' + self.default_win_rm_certificate_thumbprint = u'' + + +class PersistentVMDowntimeInfo(WindowsAzureData): + + def __init__(self): + self.start_time = u'' + self.end_time = u'' + self.status = u'' + + +class Certificates(WindowsAzureData): + + def __init__(self): + self.certificates = _list_of(Certificate) + + def __iter__(self): + return iter(self.certificates) + + def __len__(self): + return len(self.certificates) + + def __getitem__(self, index): + return self.certificates[index] + + +class Certificate(WindowsAzureData): + + def __init__(self): + self.certificate_url = u'' + self.thumbprint = u'' + self.thumbprint_algorithm = u'' + self.data = u'' + + +class OperationError(WindowsAzureData): + + def __init__(self): + self.code = u'' + self.message = u'' + + +class Operation(WindowsAzureData): + + def __init__(self): + self.id = u'' + self.status = u'' + self.http_status_code = u'' + self.error = OperationError() + + +class OperatingSystem(WindowsAzureData): + + def __init__(self): + self.version = u'' + self.label = _Base64String() + self.is_default = True + self.is_active = True + self.family = 0 + self.family_label = _Base64String() + + +class OperatingSystems(WindowsAzureData): + + def __init__(self): + self.operating_systems = _list_of(OperatingSystem) + + def __iter__(self): + return iter(self.operating_systems) + + def __len__(self): + return len(self.operating_systems) + + def __getitem__(self, index): + return self.operating_systems[index] + + +class OperatingSystemFamily(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.label = _Base64String() + self.operating_systems = OperatingSystems() + + +class OperatingSystemFamilies(WindowsAzureData): + + def __init__(self): + self.operating_system_families = _list_of(OperatingSystemFamily) + + def __iter__(self): + return iter(self.operating_system_families) + + def __len__(self): + return len(self.operating_system_families) + + def __getitem__(self, index): + return self.operating_system_families[index] + + +class Subscription(WindowsAzureData): + + def __init__(self): + self.subscription_id = u'' + self.subscription_name = u'' + self.subscription_status = u'' + self.account_admin_live_email_id = u'' + self.service_admin_live_email_id = u'' + self.max_core_count = 0 + self.max_storage_accounts = 0 + self.max_hosted_services = 0 + self.current_core_count = 0 + self.current_hosted_services = 0 + self.current_storage_accounts = 0 + self.max_virtual_network_sites = 0 + self.max_local_network_sites = 0 + self.max_dns_servers = 0 + + +class AvailabilityResponse(WindowsAzureData): + + def __init__(self): + self.result = False + + +class SubscriptionCertificates(WindowsAzureData): + + def __init__(self): + self.subscription_certificates = _list_of(SubscriptionCertificate) + + def __iter__(self): + return iter(self.subscription_certificates) + + def __len__(self): + return len(self.subscription_certificates) + + def __getitem__(self, index): + return self.subscription_certificates[index] + + +class SubscriptionCertificate(WindowsAzureData): + + def __init__(self): + self.subscription_certificate_public_key = u'' + self.subscription_certificate_thumbprint = u'' + self.subscription_certificate_data = u'' + self.created = u'' + + +class Images(WindowsAzureData): + + def __init__(self): + self.images = _list_of(OSImage) + + def __iter__(self): + return iter(self.images) + + def __len__(self): + return len(self.images) + + def __getitem__(self, index): + return self.images[index] + + +class OSImage(WindowsAzureData): + + def __init__(self): + self.affinity_group = u'' + self.category = u'' + self.location = u'' + self.logical_size_in_gb = 0 + self.label = u'' + self.media_link = u'' + self.name = u'' + self.os = u'' + self.eula = u'' + self.description = u'' + + +class Disks(WindowsAzureData): + + def __init__(self): + self.disks = _list_of(Disk) + + def __iter__(self): + return iter(self.disks) + + def __len__(self): + return len(self.disks) + + def __getitem__(self, index): + return self.disks[index] + + +class Disk(WindowsAzureData): + + def __init__(self): + self.affinity_group = u'' + self.attached_to = AttachedTo() + self.has_operating_system = u'' + self.is_corrupted = u'' + self.location = u'' + self.logical_disk_size_in_gb = 0 + self.label = u'' + self.media_link = u'' + self.name = u'' + self.os = u'' + self.source_image_name = u'' + + +class AttachedTo(WindowsAzureData): + + def __init__(self): + self.hosted_service_name = u'' + self.deployment_name = u'' + self.role_name = u'' + + +class PersistentVMRole(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.role_type = u'' + self.os_version = u'' # undocumented + self.configuration_sets = ConfigurationSets() + self.availability_set_name = u'' + self.data_virtual_hard_disks = DataVirtualHardDisks() + self.os_virtual_hard_disk = OSVirtualHardDisk() + self.role_size = u'' + self.default_win_rm_certificate_thumbprint = u'' + + +class ConfigurationSets(WindowsAzureData): + + def __init__(self): + self.configuration_sets = _list_of(ConfigurationSet) + + def __iter__(self): + return iter(self.configuration_sets) + + def __len__(self): + return len(self.configuration_sets) + + def __getitem__(self, index): + return self.configuration_sets[index] + + +class ConfigurationSet(WindowsAzureData): + + def __init__(self): + self.configuration_set_type = u'NetworkConfiguration' + self.role_type = u'' + self.input_endpoints = ConfigurationSetInputEndpoints() + self.subnet_names = _scalar_list_of(str, 'SubnetName') + + +class ConfigurationSetInputEndpoints(WindowsAzureData): + + def __init__(self): + self.input_endpoints = _list_of( + ConfigurationSetInputEndpoint, 'InputEndpoint') + + def __iter__(self): + return iter(self.input_endpoints) + + def __len__(self): + return len(self.input_endpoints) + + def __getitem__(self, index): + return self.input_endpoints[index] + + +class ConfigurationSetInputEndpoint(WindowsAzureData): + + ''' + Initializes a network configuration input endpoint. + + name: Specifies the name for the external endpoint. + protocol: + Specifies the protocol to use to inspect the virtual machine + availability status. Possible values are: HTTP, TCP. + port: Specifies the external port to use for the endpoint. + local_port: + Specifies the internal port on which the virtual machine is listening + to serve the endpoint. + load_balanced_endpoint_set_name: + Specifies a name for a set of load-balanced endpoints. Specifying this + element for a given endpoint adds it to the set. If you are setting an + endpoint to use to connect to the virtual machine via the Remote + Desktop, do not set this property. + enable_direct_server_return: + Specifies whether direct server return load balancing is enabled. + ''' + + def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'', + load_balanced_endpoint_set_name=u'', + enable_direct_server_return=False): + self.enable_direct_server_return = enable_direct_server_return + self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name + self.local_port = local_port + self.name = name + self.port = port + self.load_balancer_probe = LoadBalancerProbe() + self.protocol = protocol + + +class WindowsConfigurationSet(WindowsAzureData): + + def __init__(self, computer_name=None, admin_password=None, + reset_password_on_first_logon=None, + enable_automatic_updates=None, time_zone=None, + admin_username=None): + self.configuration_set_type = u'WindowsProvisioningConfiguration' + self.computer_name = computer_name + self.admin_password = admin_password + self.admin_username = admin_username + self.reset_password_on_first_logon = reset_password_on_first_logon + self.enable_automatic_updates = enable_automatic_updates + self.time_zone = time_zone + self.domain_join = DomainJoin() + self.stored_certificate_settings = StoredCertificateSettings() + self.win_rm = WinRM() + + +class DomainJoin(WindowsAzureData): + + def __init__(self): + self.credentials = Credentials() + self.join_domain = u'' + self.machine_object_ou = u'' + + +class Credentials(WindowsAzureData): + + def __init__(self): + self.domain = u'' + self.username = u'' + self.password = u'' + + +class StoredCertificateSettings(WindowsAzureData): + + def __init__(self): + self.stored_certificate_settings = _list_of(CertificateSetting) + + def __iter__(self): + return iter(self.stored_certificate_settings) + + def __len__(self): + return len(self.stored_certificate_settings) + + def __getitem__(self, index): + return self.stored_certificate_settings[index] + + +class CertificateSetting(WindowsAzureData): + + ''' + Initializes a certificate setting. + + thumbprint: + Specifies the thumbprint of the certificate to be provisioned. The + thumbprint must specify an existing service certificate. + store_name: + Specifies the name of the certificate store from which retrieve + certificate. + store_location: + Specifies the target certificate store location on the virtual machine. + The only supported value is LocalMachine. + ''' + + def __init__(self, thumbprint=u'', store_name=u'', store_location=u''): + self.thumbprint = thumbprint + self.store_name = store_name + self.store_location = store_location + + +class WinRM(WindowsAzureData): + + ''' + Contains configuration settings for the Windows Remote Management service on + the Virtual Machine. + ''' + + def __init__(self): + self.listeners = Listeners() + + +class Listeners(WindowsAzureData): + + def __init__(self): + self.listeners = _list_of(Listener) + + def __iter__(self): + return iter(self.listeners) + + def __len__(self): + return len(self.listeners) + + def __getitem__(self, index): + return self.listeners[index] + + +class Listener(WindowsAzureData): + + ''' + Specifies the protocol and certificate information for the listener. + + protocol: + Specifies the protocol of listener. Possible values are: Http, Https. + The value is case sensitive. + certificate_thumbprint: + Optional. Specifies the certificate thumbprint for the secure + connection. If this value is not specified, a self-signed certificate is + generated and used for the Virtual Machine. + ''' + + def __init__(self, protocol=u'', certificate_thumbprint=u''): + self.protocol = protocol + self.certificate_thumbprint = certificate_thumbprint + + +class LinuxConfigurationSet(WindowsAzureData): + + def __init__(self, host_name=None, user_name=None, user_password=None, + disable_ssh_password_authentication=None): + self.configuration_set_type = u'LinuxProvisioningConfiguration' + self.host_name = host_name + self.user_name = user_name + self.user_password = user_password + self.disable_ssh_password_authentication =\ + disable_ssh_password_authentication + self.ssh = SSH() + + +class SSH(WindowsAzureData): + + def __init__(self): + self.public_keys = PublicKeys() + self.key_pairs = KeyPairs() + + +class PublicKeys(WindowsAzureData): + + def __init__(self): + self.public_keys = _list_of(PublicKey) + + def __iter__(self): + return iter(self.public_keys) + + def __len__(self): + return len(self.public_keys) + + def __getitem__(self, index): + return self.public_keys[index] + + +class PublicKey(WindowsAzureData): + + def __init__(self, fingerprint=u'', path=u''): + self.fingerprint = fingerprint + self.path = path + + +class KeyPairs(WindowsAzureData): + + def __init__(self): + self.key_pairs = _list_of(KeyPair) + + def __iter__(self): + return iter(self.key_pairs) + + def __len__(self): + return len(self.key_pairs) + + def __getitem__(self, index): + return self.key_pairs[index] + + +class KeyPair(WindowsAzureData): + + def __init__(self, fingerprint=u'', path=u''): + self.fingerprint = fingerprint + self.path = path + + +class LoadBalancerProbe(WindowsAzureData): + + def __init__(self): + self.path = u'' + self.port = u'' + self.protocol = u'' + + +class DataVirtualHardDisks(WindowsAzureData): + + def __init__(self): + self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk) + + def __iter__(self): + return iter(self.data_virtual_hard_disks) + + def __len__(self): + return len(self.data_virtual_hard_disks) + + def __getitem__(self, index): + return self.data_virtual_hard_disks[index] + + +class DataVirtualHardDisk(WindowsAzureData): + + def __init__(self): + self.host_caching = u'' + self.disk_label = u'' + self.disk_name = u'' + self.lun = 0 + self.logical_disk_size_in_gb = 0 + self.media_link = u'' + + +class OSVirtualHardDisk(WindowsAzureData): + + def __init__(self, source_image_name=None, media_link=None, + host_caching=None, disk_label=None, disk_name=None): + self.source_image_name = source_image_name + self.media_link = media_link + self.host_caching = host_caching + self.disk_label = disk_label + self.disk_name = disk_name + self.os = u'' # undocumented, not used when adding a role + + +class AsynchronousOperationResult(WindowsAzureData): + + def __init__(self, request_id=None): + self.request_id = request_id + + +class ServiceBusRegion(WindowsAzureData): + + def __init__(self): + self.code = u'' + self.fullname = u'' + + +class ServiceBusNamespace(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.region = u'' + self.default_key = u'' + self.status = u'' + self.created_at = u'' + self.acs_management_endpoint = u'' + self.servicebus_endpoint = u'' + self.connection_string = u'' + self.subscription_id = u'' + self.enabled = False + + +def _update_management_header(request): + ''' Add additional headers for management. ''' + + if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: + request.headers.append(('Content-Length', str(len(request.body)))) + + # append additional headers base on the service + request.headers.append(('x-ms-version', X_MS_VERSION)) + + # if it is not GET or HEAD request, must set content-type. + if not request.method in ['GET', 'HEAD']: + for name, _ in request.headers: + if 'content-type' == name.lower(): + break + else: + request.headers.append( + ('Content-Type', + 'application/atom+xml;type=entry;charset=utf-8')) + + return request.headers + + +def _parse_response_for_async_op(response): + ''' Extracts request id from response header. ''' + + if response is None: + return None + + result = AsynchronousOperationResult() + if response.headers: + for name, value in response.headers: + if name.lower() == 'x-ms-request-id': + result.request_id = value + + return result + + +def _management_error_handler(http_error): + ''' Simple error handler for management service. ''' + return _general_error_handler(http_error) + + +def _lower(text): + return text.lower() + + +class _XmlSerializer(object): + + @staticmethod + def create_storage_service_input_to_xml(service_name, description, label, + affinity_group, location, + geo_replication_enabled, + extended_properties): + return _XmlSerializer.doc_from_data( + 'CreateStorageServiceInput', + [('ServiceName', service_name), + ('Description', description), + ('Label', label, _encode_base64), + ('AffinityGroup', affinity_group), + ('Location', location), + ('GeoReplicationEnabled', geo_replication_enabled, _lower)], + extended_properties) + + @staticmethod + def update_storage_service_input_to_xml(description, label, + geo_replication_enabled, + extended_properties): + return _XmlSerializer.doc_from_data( + 'UpdateStorageServiceInput', + [('Description', description), + ('Label', label, _encode_base64), + ('GeoReplicationEnabled', geo_replication_enabled, _lower)], + extended_properties) + + @staticmethod + def regenerate_keys_to_xml(key_type): + return _XmlSerializer.doc_from_data('RegenerateKeys', + [('KeyType', key_type)]) + + @staticmethod + def update_hosted_service_to_xml(label, description, extended_properties): + return _XmlSerializer.doc_from_data('UpdateHostedService', + [('Label', label, _encode_base64), + ('Description', description)], + extended_properties) + + @staticmethod + def create_hosted_service_to_xml(service_name, label, description, + location, affinity_group, + extended_properties): + return _XmlSerializer.doc_from_data( + 'CreateHostedService', + [('ServiceName', service_name), + ('Label', label, _encode_base64), + ('Description', description), + ('Location', location), + ('AffinityGroup', affinity_group)], + extended_properties) + + @staticmethod + def create_deployment_to_xml(name, package_url, label, configuration, + start_deployment, treat_warnings_as_error, + extended_properties): + return _XmlSerializer.doc_from_data( + 'CreateDeployment', + [('Name', name), + ('PackageUrl', package_url), + ('Label', label, _encode_base64), + ('Configuration', configuration), + ('StartDeployment', + start_deployment, _lower), + ('TreatWarningsAsError', treat_warnings_as_error, _lower)], + extended_properties) + + @staticmethod + def swap_deployment_to_xml(production, source_deployment): + return _XmlSerializer.doc_from_data( + 'Swap', + [('Production', production), + ('SourceDeployment', source_deployment)]) + + @staticmethod + def update_deployment_status_to_xml(status): + return _XmlSerializer.doc_from_data( + 'UpdateDeploymentStatus', + [('Status', status)]) + + @staticmethod + def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, + extended_properties): + return _XmlSerializer.doc_from_data( + 'ChangeConfiguration', + [('Configuration', configuration), + ('TreatWarningsAsError', treat_warnings_as_error, _lower), + ('Mode', mode)], + extended_properties) + + @staticmethod + def upgrade_deployment_to_xml(mode, package_url, configuration, label, + role_to_upgrade, force, extended_properties): + return _XmlSerializer.doc_from_data( + 'UpgradeDeployment', + [('Mode', mode), + ('PackageUrl', package_url), + ('Configuration', configuration), + ('Label', label, _encode_base64), + ('RoleToUpgrade', role_to_upgrade), + ('Force', force, _lower)], + extended_properties) + + @staticmethod + def rollback_upgrade_to_xml(mode, force): + return _XmlSerializer.doc_from_data( + 'RollbackUpdateOrUpgrade', + [('Mode', mode), + ('Force', force, _lower)]) + + @staticmethod + def walk_upgrade_domain_to_xml(upgrade_domain): + return _XmlSerializer.doc_from_data( + 'WalkUpgradeDomain', + [('UpgradeDomain', upgrade_domain)]) + + @staticmethod + def certificate_file_to_xml(data, certificate_format, password): + return _XmlSerializer.doc_from_data( + 'CertificateFile', + [('Data', data), + ('CertificateFormat', certificate_format), + ('Password', password)]) + + @staticmethod + def create_affinity_group_to_xml(name, label, description, location): + return _XmlSerializer.doc_from_data( + 'CreateAffinityGroup', + [('Name', name), + ('Label', label, _encode_base64), + ('Description', description), + ('Location', location)]) + + @staticmethod + def update_affinity_group_to_xml(label, description): + return _XmlSerializer.doc_from_data( + 'UpdateAffinityGroup', + [('Label', label, _encode_base64), + ('Description', description)]) + + @staticmethod + def subscription_certificate_to_xml(public_key, thumbprint, data): + return _XmlSerializer.doc_from_data( + 'SubscriptionCertificate', + [('SubscriptionCertificatePublicKey', public_key), + ('SubscriptionCertificateThumbprint', thumbprint), + ('SubscriptionCertificateData', data)]) + + @staticmethod + def os_image_to_xml(label, media_link, name, os): + return _XmlSerializer.doc_from_data( + 'OSImage', + [('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os)]) + + @staticmethod + def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, + logical_disk_size_in_gb, media_link, + source_media_link): + return _XmlSerializer.doc_from_data( + 'DataVirtualHardDisk', + [('HostCaching', host_caching), + ('DiskLabel', disk_label), + ('DiskName', disk_name), + ('Lun', lun), + ('LogicalDiskSizeInGB', logical_disk_size_in_gb), + ('MediaLink', media_link), + ('SourceMediaLink', source_media_link)]) + + @staticmethod + def disk_to_xml(has_operating_system, label, media_link, name, os): + return _XmlSerializer.doc_from_data( + 'Disk', + [('HasOperatingSystem', has_operating_system, _lower), + ('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os)]) + + @staticmethod + def restart_role_operation_to_xml(): + return _XmlSerializer.doc_from_xml( + 'RestartRoleOperation', + '<OperationType>RestartRoleOperation</OperationType>') + + @staticmethod + def shutdown_role_operation_to_xml(post_shutdown_action): + xml = _XmlSerializer.data_to_xml( + [('OperationType', 'ShutdownRoleOperation'), + ('PostShutdownAction', post_shutdown_action)]) + return _XmlSerializer.doc_from_xml('ShutdownRoleOperation', xml) + + @staticmethod + def shutdown_roles_operation_to_xml(role_names, post_shutdown_action): + xml = _XmlSerializer.data_to_xml( + [('OperationType', 'ShutdownRolesOperation')]) + xml += '<Roles>' + for role_name in role_names: + xml += _XmlSerializer.data_to_xml([('Name', role_name)]) + xml += '</Roles>' + xml += _XmlSerializer.data_to_xml( + [('PostShutdownAction', post_shutdown_action)]) + return _XmlSerializer.doc_from_xml('ShutdownRolesOperation', xml) + + @staticmethod + def start_role_operation_to_xml(): + return _XmlSerializer.doc_from_xml( + 'StartRoleOperation', + '<OperationType>StartRoleOperation</OperationType>') + + @staticmethod + def start_roles_operation_to_xml(role_names): + xml = _XmlSerializer.data_to_xml( + [('OperationType', 'StartRolesOperation')]) + xml += '<Roles>' + for role_name in role_names: + xml += _XmlSerializer.data_to_xml([('Name', role_name)]) + xml += '</Roles>' + return _XmlSerializer.doc_from_xml('StartRolesOperation', xml) + + @staticmethod + def windows_configuration_to_xml(configuration): + xml = _XmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type), + ('ComputerName', configuration.computer_name), + ('AdminPassword', configuration.admin_password), + ('ResetPasswordOnFirstLogon', + configuration.reset_password_on_first_logon, + _lower), + ('EnableAutomaticUpdates', + configuration.enable_automatic_updates, + _lower), + ('TimeZone', configuration.time_zone)]) + + if configuration.domain_join is not None: + xml += '<DomainJoin>' + xml += '<Credentials>' + xml += _XmlSerializer.data_to_xml( + [('Domain', configuration.domain_join.credentials.domain), + ('Username', configuration.domain_join.credentials.username), + ('Password', configuration.domain_join.credentials.password)]) + xml += '</Credentials>' + xml += _XmlSerializer.data_to_xml( + [('JoinDomain', configuration.domain_join.join_domain), + ('MachineObjectOU', + configuration.domain_join.machine_object_ou)]) + xml += '</DomainJoin>' + if configuration.stored_certificate_settings is not None: + xml += '<StoredCertificateSettings>' + for cert in configuration.stored_certificate_settings: + xml += '<CertificateSetting>' + xml += _XmlSerializer.data_to_xml( + [('StoreLocation', cert.store_location), + ('StoreName', cert.store_name), + ('Thumbprint', cert.thumbprint)]) + xml += '</CertificateSetting>' + xml += '</StoredCertificateSettings>' + if configuration.win_rm is not None: + xml += '<WinRM><Listeners>' + for listener in configuration.win_rm.listeners: + xml += '<Listener>' + xml += _XmlSerializer.data_to_xml( + [('Protocol', listener.protocol), + ('CertificateThumbprint', listener.certificate_thumbprint)]) + xml += '</Listener>' + xml += '</Listeners></WinRM>' + xml += _XmlSerializer.data_to_xml( + [('AdminUsername', configuration.admin_username)]) + return xml + + @staticmethod + def linux_configuration_to_xml(configuration): + xml = _XmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type), + ('HostName', configuration.host_name), + ('UserName', configuration.user_name), + ('UserPassword', configuration.user_password), + ('DisableSshPasswordAuthentication', + configuration.disable_ssh_password_authentication, + _lower)]) + + if configuration.ssh is not None: + xml += '<SSH>' + xml += '<PublicKeys>' + for key in configuration.ssh.public_keys: + xml += '<PublicKey>' + xml += _XmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint), + ('Path', key.path)]) + xml += '</PublicKey>' + xml += '</PublicKeys>' + xml += '<KeyPairs>' + for key in configuration.ssh.key_pairs: + xml += '<KeyPair>' + xml += _XmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint), + ('Path', key.path)]) + xml += '</KeyPair>' + xml += '</KeyPairs>' + xml += '</SSH>' + return xml + + @staticmethod + def network_configuration_to_xml(configuration): + xml = _XmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type)]) + xml += '<InputEndpoints>' + for endpoint in configuration.input_endpoints: + xml += '<InputEndpoint>' + xml += _XmlSerializer.data_to_xml( + [('LoadBalancedEndpointSetName', + endpoint.load_balanced_endpoint_set_name), + ('LocalPort', endpoint.local_port), + ('Name', endpoint.name), + ('Port', endpoint.port)]) + + if endpoint.load_balancer_probe.path or\ + endpoint.load_balancer_probe.port or\ + endpoint.load_balancer_probe.protocol: + xml += '<LoadBalancerProbe>' + xml += _XmlSerializer.data_to_xml( + [('Path', endpoint.load_balancer_probe.path), + ('Port', endpoint.load_balancer_probe.port), + ('Protocol', endpoint.load_balancer_probe.protocol)]) + xml += '</LoadBalancerProbe>' + + xml += _XmlSerializer.data_to_xml( + [('Protocol', endpoint.protocol), + ('EnableDirectServerReturn', + endpoint.enable_direct_server_return, + _lower)]) + + xml += '</InputEndpoint>' + xml += '</InputEndpoints>' + xml += '<SubnetNames>' + for name in configuration.subnet_names: + xml += _XmlSerializer.data_to_xml([('SubnetName', name)]) + xml += '</SubnetNames>' + return xml + + @staticmethod + def role_to_xml(availability_set_name, data_virtual_hard_disks, + network_configuration_set, os_virtual_hard_disk, role_name, + role_size, role_type, system_configuration_set): + xml = _XmlSerializer.data_to_xml([('RoleName', role_name), + ('RoleType', role_type)]) + + xml += '<ConfigurationSets>' + + if system_configuration_set is not None: + xml += '<ConfigurationSet>' + if isinstance(system_configuration_set, WindowsConfigurationSet): + xml += _XmlSerializer.windows_configuration_to_xml( + system_configuration_set) + elif isinstance(system_configuration_set, LinuxConfigurationSet): + xml += _XmlSerializer.linux_configuration_to_xml( + system_configuration_set) + xml += '</ConfigurationSet>' + + if network_configuration_set is not None: + xml += '<ConfigurationSet>' + xml += _XmlSerializer.network_configuration_to_xml( + network_configuration_set) + xml += '</ConfigurationSet>' + + xml += '</ConfigurationSets>' + + if availability_set_name is not None: + xml += _XmlSerializer.data_to_xml( + [('AvailabilitySetName', availability_set_name)]) + + if data_virtual_hard_disks is not None: + xml += '<DataVirtualHardDisks>' + for hd in data_virtual_hard_disks: + xml += '<DataVirtualHardDisk>' + xml += _XmlSerializer.data_to_xml( + [('HostCaching', hd.host_caching), + ('DiskLabel', hd.disk_label), + ('DiskName', hd.disk_name), + ('Lun', hd.lun), + ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb), + ('MediaLink', hd.media_link)]) + xml += '</DataVirtualHardDisk>' + xml += '</DataVirtualHardDisks>' + + if os_virtual_hard_disk is not None: + xml += '<OSVirtualHardDisk>' + xml += _XmlSerializer.data_to_xml( + [('HostCaching', os_virtual_hard_disk.host_caching), + ('DiskLabel', os_virtual_hard_disk.disk_label), + ('DiskName', os_virtual_hard_disk.disk_name), + ('MediaLink', os_virtual_hard_disk.media_link), + ('SourceImageName', os_virtual_hard_disk.source_image_name)]) + xml += '</OSVirtualHardDisk>' + + if role_size is not None: + xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)]) + + return xml + + @staticmethod + def add_role_to_xml(role_name, system_configuration_set, + os_virtual_hard_disk, role_type, + network_configuration_set, availability_set_name, + data_virtual_hard_disks, role_size): + xml = _XmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + system_configuration_set) + return _XmlSerializer.doc_from_xml('PersistentVMRole', xml) + + @staticmethod + def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, + network_configuration_set, availability_set_name, + data_virtual_hard_disks, role_size): + xml = _XmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + None) + return _XmlSerializer.doc_from_xml('PersistentVMRole', xml) + + @staticmethod + def capture_role_to_xml(post_capture_action, target_image_name, + target_image_label, provisioning_configuration): + xml = _XmlSerializer.data_to_xml( + [('OperationType', 'CaptureRoleOperation'), + ('PostCaptureAction', post_capture_action)]) + + if provisioning_configuration is not None: + xml += '<ProvisioningConfiguration>' + if isinstance(provisioning_configuration, WindowsConfigurationSet): + xml += _XmlSerializer.windows_configuration_to_xml( + provisioning_configuration) + elif isinstance(provisioning_configuration, LinuxConfigurationSet): + xml += _XmlSerializer.linux_configuration_to_xml( + provisioning_configuration) + xml += '</ProvisioningConfiguration>' + + xml += _XmlSerializer.data_to_xml( + [('TargetImageLabel', target_image_label), + ('TargetImageName', target_image_name)]) + + return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml) + + @staticmethod + def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, + label, role_name, + system_configuration_set, + os_virtual_hard_disk, role_type, + network_configuration_set, + availability_set_name, + data_virtual_hard_disks, role_size, + virtual_network_name): + xml = _XmlSerializer.data_to_xml([('Name', deployment_name), + ('DeploymentSlot', deployment_slot), + ('Label', label)]) + xml += '<RoleList>' + xml += '<Role>' + xml += _XmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + system_configuration_set) + xml += '</Role>' + xml += '</RoleList>' + + if virtual_network_name is not None: + xml += _XmlSerializer.data_to_xml( + [('VirtualNetworkName', virtual_network_name)]) + + return _XmlSerializer.doc_from_xml('Deployment', xml) + + @staticmethod + def data_to_xml(data): + '''Creates an xml fragment from the specified data. + data: Array of tuples, where first: xml element name + second: xml element text + third: conversion function + ''' + xml = '' + for element in data: + name = element[0] + val = element[1] + if len(element) > 2: + converter = element[2] + else: + converter = None + + if val is not None: + if converter is not None: + text = _str(converter(_str(val))) + else: + text = _str(val) + + xml += ''.join(['<', name, '>', text, '</', name, '>']) + return xml + + @staticmethod + def doc_from_xml(document_element_name, inner_xml): + '''Wraps the specified xml in an xml root element with default azure + namespaces''' + xml = ''.join(['<', document_element_name, + ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', + ' xmlns="http://schemas.microsoft.com/windowsazure">']) + xml += inner_xml + xml += ''.join(['</', document_element_name, '>']) + return xml + + @staticmethod + def doc_from_data(document_element_name, data, extended_properties=None): + xml = _XmlSerializer.data_to_xml(data) + if extended_properties is not None: + xml += _XmlSerializer.extended_properties_dict_to_xml_fragment( + extended_properties) + return _XmlSerializer.doc_from_xml(document_element_name, xml) + + @staticmethod + def extended_properties_dict_to_xml_fragment(extended_properties): + xml = '' + if extended_properties is not None and len(extended_properties) > 0: + xml += '<ExtendedProperties>' + for key, val in extended_properties.items(): + xml += ''.join(['<ExtendedProperty>', + '<Name>', + _str(key), + '</Name>', + '<Value>', + _str(val), + '</Value>', + '</ExtendedProperty>']) + xml += '</ExtendedProperties>' + return xml + + +def _parse_bool(value): + if value.lower() == 'true': + return True + return False + + +class _ServiceBusManagementXmlSerializer(object): + + @staticmethod + def namespace_to_xml(region): + '''Converts a service bus namespace description to xml + + The xml format: +<?xml version="1.0" encoding="utf-8" standalone="yes"?> +<entry xmlns="http://www.w3.org/2005/Atom"> + <content type="application/xml"> + <NamespaceDescription + xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"> + <Region>West US</Region> + </NamespaceDescription> + </content> +</entry> + ''' + body = '<NamespaceDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">' + body += ''.join(['<Region>', region, '</Region>']) + body += '</NamespaceDescription>' + + return _create_entry(body) + + @staticmethod + def xml_to_namespace(xmlstr): + '''Converts xml response to service bus namespace + + The xml format for namespace: +<entry> +<id>uuid:00000000-0000-0000-0000-000000000000;id=0000000</id> +<title type="text">myunittests +2012-08-22T16:48:10Z + + + myunittests + West US + 0000000000000000000000000000000000000000000= + Active + 2012-08-22T16:48:10.217Z + https://myunittests-sb.accesscontrol.windows.net/ + https://myunittests.servicebus.windows.net/ + Endpoint=sb://myunittests.servicebus.windows.net/;SharedSecretIssuer=owner;SharedSecretValue=0000000000000000000000000000000000000000000= + 00000000000000000000000000000000 + true + + + + ''' + xmldoc = minidom.parseString(xmlstr) + namespace = ServiceBusNamespace() + + mappings = ( + ('Name', 'name', None), + ('Region', 'region', None), + ('DefaultKey', 'default_key', None), + ('Status', 'status', None), + ('CreatedAt', 'created_at', None), + ('AcsManagementEndpoint', 'acs_management_endpoint', None), + ('ServiceBusEndpoint', 'servicebus_endpoint', None), + ('ConnectionString', 'connection_string', None), + ('SubscriptionId', 'subscription_id', None), + ('Enabled', 'enabled', _parse_bool), + ) + + for desc in _get_children_from_path(xmldoc, + 'entry', + 'content', + 'NamespaceDescription'): + for xml_name, field_name, conversion_func in mappings: + node_value = _get_first_child_node_value(desc, xml_name) + if node_value is not None: + if conversion_func is not None: + node_value = conversion_func(node_value) + setattr(namespace, field_name, node_value) + + return namespace + + @staticmethod + def xml_to_region(xmlstr): + '''Converts xml response to service bus region + + The xml format for region: + +uuid:157c311f-081f-4b4a-a0ba-a8f990ffd2a3;id=1756759 + +2013-04-10T18:25:29Z + + + East Asia + East Asia + + + + ''' + xmldoc = minidom.parseString(xmlstr) + region = ServiceBusRegion() + + for desc in _get_children_from_path(xmldoc, 'entry', 'content', + 'RegionCodeDescription'): + node_value = _get_first_child_node_value(desc, 'Code') + if node_value is not None: + region.code = node_value + node_value = _get_first_child_node_value(desc, 'FullName') + if node_value is not None: + region.fullname = node_value + + return region + + @staticmethod + def xml_to_namespace_availability(xmlstr): + '''Converts xml response to service bus namespace availability + + The xml format: + + + uuid:9fc7c652-1856-47ab-8d74-cd31502ea8e6;id=3683292 + + 2013-04-16T03:03:37Z + + + false + + + + ''' + xmldoc = minidom.parseString(xmlstr) + availability = AvailabilityResponse() + + for desc in _get_children_from_path(xmldoc, 'entry', 'content', + 'NamespaceAvailability'): + node_value = _get_first_child_node_value(desc, 'Result') + if node_value is not None: + availability.result = _parse_bool(node_value) + + return availability + +from azure.servicemanagement.servicemanagementservice import ( + ServiceManagementService) +from azure.servicemanagement.servicebusmanagementservice import ( + ServiceBusManagementService) diff --git a/awx/lib/site-packages/azure/servicemanagement/servicebusmanagementservice.py b/awx/lib/site-packages/azure/servicemanagement/servicebusmanagementservice.py new file mode 100644 index 0000000000..51d734e367 --- /dev/null +++ b/awx/lib/site-packages/azure/servicemanagement/servicebusmanagementservice.py @@ -0,0 +1,113 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure import ( + MANAGEMENT_HOST, + _convert_response_to_feeds, + _str, + _validate_not_none, + ) +from azure.servicemanagement import ( + _ServiceBusManagementXmlSerializer, + ) +from azure.servicemanagement.servicemanagementclient import ( + _ServiceManagementClient, + ) + + +class ServiceBusManagementService(_ServiceManagementClient): + + def __init__(self, subscription_id=None, cert_file=None, + host=MANAGEMENT_HOST): + super(ServiceBusManagementService, self).__init__( + subscription_id, cert_file, host) + + #--Operations for service bus ---------------------------------------- + def get_regions(self): + ''' + Get list of available service bus regions. + ''' + response = self._perform_get( + self._get_path('services/serviceBus/Regions/', None), + None) + + return _convert_response_to_feeds( + response, + _ServiceBusManagementXmlSerializer.xml_to_region) + + def list_namespaces(self): + ''' + List the service bus namespaces defined on the account. + ''' + response = self._perform_get( + self._get_path('services/serviceBus/Namespaces/', None), + None) + + return _convert_response_to_feeds( + response, + _ServiceBusManagementXmlSerializer.xml_to_namespace) + + def get_namespace(self, name): + ''' + Get details about a specific namespace. + + name: Name of the service bus namespace. + ''' + response = self._perform_get( + self._get_path('services/serviceBus/Namespaces', name), + None) + + return _ServiceBusManagementXmlSerializer.xml_to_namespace( + response.body) + + def create_namespace(self, name, region): + ''' + Create a new service bus namespace. + + name: Name of the service bus namespace to create. + region: Region to create the namespace in. + ''' + _validate_not_none('name', name) + + return self._perform_put( + self._get_path('services/serviceBus/Namespaces', name), + _ServiceBusManagementXmlSerializer.namespace_to_xml(region)) + + def delete_namespace(self, name): + ''' + Delete a service bus namespace. + + name: Name of the service bus namespace to delete. + ''' + _validate_not_none('name', name) + + return self._perform_delete( + self._get_path('services/serviceBus/Namespaces', name), + None) + + def check_namespace_availability(self, name): + ''' + Checks to see if the specified service bus namespace is available, or + if it has already been taken. + + name: Name of the service bus namespace to validate. + ''' + _validate_not_none('name', name) + + response = self._perform_get( + self._get_path('services/serviceBus/CheckNamespaceAvailability', + None) + '/?namespace=' + _str(name), None) + + return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability( + response.body) diff --git a/awx/lib/site-packages/azure/servicemanagement/servicemanagementclient.py b/awx/lib/site-packages/azure/servicemanagement/servicemanagementclient.py new file mode 100644 index 0000000000..53ab03e508 --- /dev/null +++ b/awx/lib/site-packages/azure/servicemanagement/servicemanagementclient.py @@ -0,0 +1,166 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import os + +from azure import ( + WindowsAzureError, + MANAGEMENT_HOST, + _get_request_body, + _parse_response, + _str, + _update_request_uri_query, + ) +from azure.http import ( + HTTPError, + HTTPRequest, + ) +from azure.http.httpclient import _HTTPClient +from azure.servicemanagement import ( + AZURE_MANAGEMENT_CERTFILE, + AZURE_MANAGEMENT_SUBSCRIPTIONID, + _management_error_handler, + _parse_response_for_async_op, + _update_management_header, + ) + + +class _ServiceManagementClient(object): + + def __init__(self, subscription_id=None, cert_file=None, + host=MANAGEMENT_HOST): + self.requestid = None + self.subscription_id = subscription_id + self.cert_file = cert_file + self.host = host + + if not self.cert_file: + if AZURE_MANAGEMENT_CERTFILE in os.environ: + self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE] + + if not self.subscription_id: + if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ: + self.subscription_id = os.environ[ + AZURE_MANAGEMENT_SUBSCRIPTIONID] + + if not self.cert_file or not self.subscription_id: + raise WindowsAzureError( + 'You need to provide subscription id and certificate file') + + self._httpclient = _HTTPClient( + service_instance=self, cert_file=self.cert_file) + self._filter = self._httpclient.perform_request + + def with_filter(self, filter): + '''Returns a new service which will process requests with the + specified filter. Filtering operations can include logging, automatic + retrying, etc... The filter is a lambda which receives the HTTPRequest + and another lambda. The filter can perform any pre-processing on the + request, pass it off to the next lambda, and then perform any + post-processing on the response.''' + res = type(self)(self.subscription_id, self.cert_file, self.host) + old_filter = self._filter + + def new_filter(request): + return filter(request, old_filter) + + res._filter = new_filter + return res + + def set_proxy(self, host, port, user=None, password=None): + ''' + Sets the proxy server host and port for the HTTP CONNECT Tunnelling. + + host: Address of the proxy. Ex: '192.168.0.100' + port: Port of the proxy. Ex: 6000 + user: User for proxy authorization. + password: Password for proxy authorization. + ''' + self._httpclient.set_proxy(host, port, user, password) + + #--Helper functions -------------------------------------------------- + def _perform_request(self, request): + try: + resp = self._filter(request) + except HTTPError as ex: + return _management_error_handler(ex) + + return resp + + def _perform_get(self, path, response_type): + request = HTTPRequest() + request.method = 'GET' + request.host = self.host + request.path = path + request.path, request.query = _update_request_uri_query(request) + request.headers = _update_management_header(request) + response = self._perform_request(request) + + if response_type is not None: + return _parse_response(response, response_type) + + return response + + def _perform_put(self, path, body, async=False): + request = HTTPRequest() + request.method = 'PUT' + request.host = self.host + request.path = path + request.body = _get_request_body(body) + request.path, request.query = _update_request_uri_query(request) + request.headers = _update_management_header(request) + response = self._perform_request(request) + + if async: + return _parse_response_for_async_op(response) + + return None + + def _perform_post(self, path, body, response_type=None, async=False): + request = HTTPRequest() + request.method = 'POST' + request.host = self.host + request.path = path + request.body = _get_request_body(body) + request.path, request.query = _update_request_uri_query(request) + request.headers = _update_management_header(request) + response = self._perform_request(request) + + if response_type is not None: + return _parse_response(response, response_type) + + if async: + return _parse_response_for_async_op(response) + + return None + + def _perform_delete(self, path, async=False): + request = HTTPRequest() + request.method = 'DELETE' + request.host = self.host + request.path = path + request.path, request.query = _update_request_uri_query(request) + request.headers = _update_management_header(request) + response = self._perform_request(request) + + if async: + return _parse_response_for_async_op(response) + + return None + + def _get_path(self, resource, name): + path = '/' + self.subscription_id + '/' + resource + if name is not None: + path += '/' + _str(name) + return path diff --git a/awx/lib/site-packages/azure/servicemanagement/servicemanagementservice.py b/awx/lib/site-packages/azure/servicemanagement/servicemanagementservice.py new file mode 100644 index 0000000000..13fcf3d76b --- /dev/null +++ b/awx/lib/site-packages/azure/servicemanagement/servicemanagementservice.py @@ -0,0 +1,1754 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure import ( + WindowsAzureError, + MANAGEMENT_HOST, + _str, + _validate_not_none, + ) +from azure.servicemanagement import ( + AffinityGroups, + AffinityGroup, + AvailabilityResponse, + Certificate, + Certificates, + DataVirtualHardDisk, + Deployment, + Disk, + Disks, + Locations, + Operation, + HostedService, + HostedServices, + Images, + OperatingSystems, + OperatingSystemFamilies, + OSImage, + PersistentVMRole, + StorageService, + StorageServices, + Subscription, + SubscriptionCertificate, + SubscriptionCertificates, + VirtualNetworkSites, + _XmlSerializer, + ) +from azure.servicemanagement.servicemanagementclient import ( + _ServiceManagementClient, + ) + +class ServiceManagementService(_ServiceManagementClient): + + def __init__(self, subscription_id=None, cert_file=None, + host=MANAGEMENT_HOST): + super(ServiceManagementService, self).__init__( + subscription_id, cert_file, host) + + #--Operations for storage accounts ----------------------------------- + def list_storage_accounts(self): + ''' + Lists the storage accounts available under the current subscription. + ''' + return self._perform_get(self._get_storage_service_path(), + StorageServices) + + def get_storage_account_properties(self, service_name): + ''' + Returns system properties for the specified storage account. + + service_name: Name of the storage service account. + ''' + _validate_not_none('service_name', service_name) + return self._perform_get(self._get_storage_service_path(service_name), + StorageService) + + def get_storage_account_keys(self, service_name): + ''' + Returns the primary and secondary access keys for the specified + storage account. + + service_name: Name of the storage service account. + ''' + _validate_not_none('service_name', service_name) + return self._perform_get( + self._get_storage_service_path(service_name) + '/keys', + StorageService) + + def regenerate_storage_account_keys(self, service_name, key_type): + ''' + Regenerates the primary or secondary access key for the specified + storage account. + + service_name: Name of the storage service account. + key_type: + Specifies which key to regenerate. Valid values are: + Primary, Secondary + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('key_type', key_type) + return self._perform_post( + self._get_storage_service_path( + service_name) + '/keys?action=regenerate', + _XmlSerializer.regenerate_keys_to_xml( + key_type), + StorageService) + + def create_storage_account(self, service_name, description, label, + affinity_group=None, location=None, + geo_replication_enabled=True, + extended_properties=None): + ''' + Creates a new storage account in Windows Azure. + + service_name: + A name for the storage account that is unique within Windows Azure. + Storage account names must be between 3 and 24 characters in length + and use numbers and lower-case letters only. + description: + A description for the storage account. The description may be up + to 1024 characters in length. + label: + A name for the storage account. The name may be up to 100 + characters in length. The name can be used to identify the storage + account for your tracking purposes. + affinity_group: + The name of an existing affinity group in the specified + subscription. You can specify either a location or affinity_group, + but not both. + location: + The location where the storage account is created. You can specify + either a location or affinity_group, but not both. + geo_replication_enabled: + Specifies whether the storage account is created with the + geo-replication enabled. If the element is not included in the + request body, the default value is true. If set to true, the data + in the storage account is replicated across more than one + geographic location so as to enable resilience in the face of + catastrophic service loss. + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('description', description) + _validate_not_none('label', label) + if affinity_group is None and location is None: + raise WindowsAzureError( + 'location or affinity_group must be specified') + if affinity_group is not None and location is not None: + raise WindowsAzureError( + 'Only one of location or affinity_group needs to be specified') + return self._perform_post( + self._get_storage_service_path(), + _XmlSerializer.create_storage_service_input_to_xml( + service_name, + description, + label, + affinity_group, + location, + geo_replication_enabled, + extended_properties), + async=True) + + def update_storage_account(self, service_name, description=None, + label=None, geo_replication_enabled=None, + extended_properties=None): + ''' + Updates the label, the description, and enables or disables the + geo-replication status for a storage account in Windows Azure. + + service_name: Name of the storage service account. + description: + A description for the storage account. The description may be up + to 1024 characters in length. + label: + A name for the storage account. The name may be up to 100 + characters in length. The name can be used to identify the storage + account for your tracking purposes. + geo_replication_enabled: + Specifies whether the storage account is created with the + geo-replication enabled. If the element is not included in the + request body, the default value is true. If set to true, the data + in the storage account is replicated across more than one + geographic location so as to enable resilience in the face of + catastrophic service loss. + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + return self._perform_put( + self._get_storage_service_path(service_name), + _XmlSerializer.update_storage_service_input_to_xml( + description, + label, + geo_replication_enabled, + extended_properties)) + + def delete_storage_account(self, service_name): + ''' + Deletes the specified storage account from Windows Azure. + + service_name: Name of the storage service account. + ''' + _validate_not_none('service_name', service_name) + return self._perform_delete( + self._get_storage_service_path(service_name)) + + def check_storage_account_name_availability(self, service_name): + ''' + Checks to see if the specified storage account name is available, or + if it has already been taken. + + service_name: Name of the storage service account. + ''' + _validate_not_none('service_name', service_name) + return self._perform_get( + self._get_storage_service_path() + + '/operations/isavailable/' + + _str(service_name) + '', + AvailabilityResponse) + + #--Operations for hosted services ------------------------------------ + def list_hosted_services(self): + ''' + Lists the hosted services available under the current subscription. + ''' + return self._perform_get(self._get_hosted_service_path(), + HostedServices) + + def get_hosted_service_properties(self, service_name, embed_detail=False): + ''' + Retrieves system properties for the specified hosted service. These + properties include the service name and service type; the name of the + affinity group to which the service belongs, or its location if it is + not part of an affinity group; and optionally, information on the + service's deployments. + + service_name: Name of the hosted service. + embed_detail: + When True, the management service returns properties for all + deployments of the service, as well as for the service itself. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('embed_detail', embed_detail) + return self._perform_get( + self._get_hosted_service_path(service_name) + + '?embed-detail=' + + _str(embed_detail).lower(), + HostedService) + + def create_hosted_service(self, service_name, label, description=None, + location=None, affinity_group=None, + extended_properties=None): + ''' + Creates a new hosted service in Windows Azure. + + service_name: + A name for the hosted service that is unique within Windows Azure. + This name is the DNS prefix name and can be used to access the + hosted service. + label: + A name for the hosted service. The name can be up to 100 characters + in length. The name can be used to identify the storage account for + your tracking purposes. + description: + A description for the hosted service. The description can be up to + 1024 characters in length. + location: + The location where the hosted service will be created. You can + specify either a location or affinity_group, but not both. + affinity_group: + The name of an existing affinity group associated with this + subscription. This name is a GUID and can be retrieved by examining + the name element of the response body returned by + list_affinity_groups. You can specify either a location or + affinity_group, but not both. + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('label', label) + if affinity_group is None and location is None: + raise WindowsAzureError( + 'location or affinity_group must be specified') + if affinity_group is not None and location is not None: + raise WindowsAzureError( + 'Only one of location or affinity_group needs to be specified') + return self._perform_post(self._get_hosted_service_path(), + _XmlSerializer.create_hosted_service_to_xml( + service_name, + label, + description, + location, + affinity_group, + extended_properties)) + + def update_hosted_service(self, service_name, label=None, description=None, + extended_properties=None): + ''' + Updates the label and/or the description for a hosted service in + Windows Azure. + + service_name: Name of the hosted service. + label: + A name for the hosted service. The name may be up to 100 characters + in length. You must specify a value for either Label or + Description, or for both. It is recommended that the label be + unique within the subscription. The name can be used + identify the hosted service for your tracking purposes. + description: + A description for the hosted service. The description may be up to + 1024 characters in length. You must specify a value for either + Label or Description, or for both. + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + return self._perform_put(self._get_hosted_service_path(service_name), + _XmlSerializer.update_hosted_service_to_xml( + label, + description, + extended_properties)) + + def delete_hosted_service(self, service_name): + ''' + Deletes the specified hosted service from Windows Azure. + + service_name: Name of the hosted service. + ''' + _validate_not_none('service_name', service_name) + return self._perform_delete(self._get_hosted_service_path(service_name)) + + def get_deployment_by_slot(self, service_name, deployment_slot): + ''' + Returns configuration information, status, and system properties for + a deployment. + + service_name: Name of the hosted service. + deployment_slot: + The environment to which the hosted service is deployed. Valid + values are: staging, production + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_slot', deployment_slot) + return self._perform_get( + self._get_deployment_path_using_slot( + service_name, deployment_slot), + Deployment) + + def get_deployment_by_name(self, service_name, deployment_name): + ''' + Returns configuration information, status, and system properties for a + deployment. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + return self._perform_get( + self._get_deployment_path_using_name( + service_name, deployment_name), + Deployment) + + def create_deployment(self, service_name, deployment_slot, name, + package_url, label, configuration, + start_deployment=False, + treat_warnings_as_error=False, + extended_properties=None): + ''' + Uploads a new service package and creates a new deployment on staging + or production. + + service_name: Name of the hosted service. + deployment_slot: + The environment to which the hosted service is deployed. Valid + values are: staging, production + name: + The name for the deployment. The deployment name must be unique + among other deployments for the hosted service. + package_url: + A URL that refers to the location of the service package in the + Blob service. The service package can be located either in a + storage account beneath the same subscription or a Shared Access + Signature (SAS) URI from any storage account. + label: + A name for the hosted service. The name can be up to 100 characters + in length. It is recommended that the label be unique within the + subscription. The name can be used to identify the hosted service + for your tracking purposes. + configuration: + The base-64 encoded service configuration file for the deployment. + start_deployment: + Indicates whether to start the deployment immediately after it is + created. If false, the service model is still deployed to the + virtual machines but the code is not run immediately. Instead, the + service is Suspended until you call Update Deployment Status and + set the status to Running, at which time the service will be + started. A deployed service still incurs charges, even if it is + suspended. + treat_warnings_as_error: + Indicates whether to treat package validation warnings as errors. + If set to true, the Created Deployment operation fails if there + are validation warnings on the service package. + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_slot', deployment_slot) + _validate_not_none('name', name) + _validate_not_none('package_url', package_url) + _validate_not_none('label', label) + _validate_not_none('configuration', configuration) + return self._perform_post( + self._get_deployment_path_using_slot( + service_name, deployment_slot), + _XmlSerializer.create_deployment_to_xml( + name, + package_url, + label, + configuration, + start_deployment, + treat_warnings_as_error, + extended_properties), + async=True) + + def delete_deployment(self, service_name, deployment_name): + ''' + Deletes the specified deployment. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + return self._perform_delete( + self._get_deployment_path_using_name( + service_name, deployment_name), + async=True) + + def swap_deployment(self, service_name, production, source_deployment): + ''' + Initiates a virtual IP swap between the staging and production + deployment environments for a service. If the service is currently + running in the staging environment, it will be swapped to the + production environment. If it is running in the production + environment, it will be swapped to staging. + + service_name: Name of the hosted service. + production: The name of the production deployment. + source_deployment: The name of the source deployment. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('production', production) + _validate_not_none('source_deployment', source_deployment) + return self._perform_post(self._get_hosted_service_path(service_name), + _XmlSerializer.swap_deployment_to_xml( + production, source_deployment), + async=True) + + def change_deployment_configuration(self, service_name, deployment_name, + configuration, + treat_warnings_as_error=False, + mode='Auto', extended_properties=None): + ''' + Initiates a change to the deployment configuration. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + configuration: + The base-64 encoded service configuration file for the deployment. + treat_warnings_as_error: + Indicates whether to treat package validation warnings as errors. + If set to true, the Created Deployment operation fails if there + are validation warnings on the service package. + mode: + If set to Manual, WalkUpgradeDomain must be called to apply the + update. If set to Auto, the Windows Azure platform will + automatically apply the update To each upgrade domain for the + service. Possible values are: Auto, Manual + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('configuration', configuration) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + '/?comp=config', + _XmlSerializer.change_deployment_to_xml( + configuration, + treat_warnings_as_error, + mode, + extended_properties), + async=True) + + def update_deployment_status(self, service_name, deployment_name, status): + ''' + Initiates a change in deployment status. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + status: + The change to initiate to the deployment status. Possible values + include: Running, Suspended + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('status', status) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + '/?comp=status', + _XmlSerializer.update_deployment_status_to_xml( + status), + async=True) + + def upgrade_deployment(self, service_name, deployment_name, mode, + package_url, configuration, label, force, + role_to_upgrade=None, extended_properties=None): + ''' + Initiates an upgrade. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + mode: + If set to Manual, WalkUpgradeDomain must be called to apply the + update. If set to Auto, the Windows Azure platform will + automatically apply the update To each upgrade domain for the + service. Possible values are: Auto, Manual + package_url: + A URL that refers to the location of the service package in the + Blob service. The service package can be located either in a + storage account beneath the same subscription or a Shared Access + Signature (SAS) URI from any storage account. + configuration: + The base-64 encoded service configuration file for the deployment. + label: + A name for the hosted service. The name can be up to 100 characters + in length. It is recommended that the label be unique within the + subscription. The name can be used to identify the hosted service + for your tracking purposes. + force: + Specifies whether the rollback should proceed even when it will + cause local data to be lost from some role instances. True if the + rollback should proceed; otherwise false if the rollback should + fail. + role_to_upgrade: The name of the specific role to upgrade. + extended_properties: + Dictionary containing name/value pairs of storage account + properties. You can have a maximum of 50 extended property + name/value pairs. The maximum length of the Name element is 64 + characters, only alphanumeric characters and underscores are valid + in the Name, and the name must start with a letter. The value has + a maximum length of 255 characters. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('mode', mode) + _validate_not_none('package_url', package_url) + _validate_not_none('configuration', configuration) + _validate_not_none('label', label) + _validate_not_none('force', force) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + '/?comp=upgrade', + _XmlSerializer.upgrade_deployment_to_xml( + mode, + package_url, + configuration, + label, + role_to_upgrade, + force, + extended_properties), + async=True) + + def walk_upgrade_domain(self, service_name, deployment_name, + upgrade_domain): + ''' + Specifies the next upgrade domain to be walked during manual in-place + upgrade or configuration change. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + upgrade_domain: + An integer value that identifies the upgrade domain to walk. + Upgrade domains are identified with a zero-based index: the first + upgrade domain has an ID of 0, the second has an ID of 1, and so on. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('upgrade_domain', upgrade_domain) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + '/?comp=walkupgradedomain', + _XmlSerializer.walk_upgrade_domain_to_xml( + upgrade_domain), + async=True) + + def rollback_update_or_upgrade(self, service_name, deployment_name, mode, + force): + ''' + Cancels an in progress configuration change (update) or upgrade and + returns the deployment to its state before the upgrade or + configuration change was started. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + mode: + Specifies whether the rollback should proceed automatically. + auto - The rollback proceeds without further user input. + manual - You must call the Walk Upgrade Domain operation to + apply the rollback to each upgrade domain. + force: + Specifies whether the rollback should proceed even when it will + cause local data to be lost from some role instances. True if the + rollback should proceed; otherwise false if the rollback should + fail. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('mode', mode) + _validate_not_none('force', force) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + '/?comp=rollback', + _XmlSerializer.rollback_upgrade_to_xml( + mode, force), + async=True) + + def reboot_role_instance(self, service_name, deployment_name, + role_instance_name): + ''' + Requests a reboot of a role instance that is running in a deployment. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + role_instance_name: The name of the role instance. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_instance_name', role_instance_name) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + \ + '/roleinstances/' + _str(role_instance_name) + \ + '?comp=reboot', + '', + async=True) + + def reimage_role_instance(self, service_name, deployment_name, + role_instance_name): + ''' + Requests a reimage of a role instance that is running in a deployment. + + service_name: Name of the hosted service. + deployment_name: The name of the deployment. + role_instance_name: The name of the role instance. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_instance_name', role_instance_name) + return self._perform_post( + self._get_deployment_path_using_name( + service_name, deployment_name) + \ + '/roleinstances/' + _str(role_instance_name) + \ + '?comp=reimage', + '', + async=True) + + def check_hosted_service_name_availability(self, service_name): + ''' + Checks to see if the specified hosted service name is available, or if + it has already been taken. + + service_name: Name of the hosted service. + ''' + _validate_not_none('service_name', service_name) + return self._perform_get( + '/' + self.subscription_id + + '/services/hostedservices/operations/isavailable/' + + _str(service_name) + '', + AvailabilityResponse) + + #--Operations for service certificates ------------------------------- + def list_service_certificates(self, service_name): + ''' + Lists all of the service certificates associated with the specified + hosted service. + + service_name: Name of the hosted service. + ''' + _validate_not_none('service_name', service_name) + return self._perform_get( + '/' + self.subscription_id + '/services/hostedservices/' + + _str(service_name) + '/certificates', + Certificates) + + def get_service_certificate(self, service_name, thumbalgorithm, thumbprint): + ''' + Returns the public data for the specified X.509 certificate associated + with a hosted service. + + service_name: Name of the hosted service. + thumbalgorithm: The algorithm for the certificate's thumbprint. + thumbprint: The hexadecimal representation of the thumbprint. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('thumbalgorithm', thumbalgorithm) + _validate_not_none('thumbprint', thumbprint) + return self._perform_get( + '/' + self.subscription_id + '/services/hostedservices/' + + _str(service_name) + '/certificates/' + + _str(thumbalgorithm) + '-' + _str(thumbprint) + '', + Certificate) + + def add_service_certificate(self, service_name, data, certificate_format, + password): + ''' + Adds a certificate to a hosted service. + + service_name: Name of the hosted service. + data: The base-64 encoded form of the pfx file. + certificate_format: + The service certificate format. The only supported value is pfx. + password: The certificate password. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('data', data) + _validate_not_none('certificate_format', certificate_format) + _validate_not_none('password', password) + return self._perform_post( + '/' + self.subscription_id + '/services/hostedservices/' + + _str(service_name) + '/certificates', + _XmlSerializer.certificate_file_to_xml( + data, certificate_format, password), + async=True) + + def delete_service_certificate(self, service_name, thumbalgorithm, + thumbprint): + ''' + Deletes a service certificate from the certificate store of a hosted + service. + + service_name: Name of the hosted service. + thumbalgorithm: The algorithm for the certificate's thumbprint. + thumbprint: The hexadecimal representation of the thumbprint. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('thumbalgorithm', thumbalgorithm) + _validate_not_none('thumbprint', thumbprint) + return self._perform_delete( + '/' + self.subscription_id + '/services/hostedservices/' + + _str(service_name) + '/certificates/' + + _str(thumbalgorithm) + '-' + _str(thumbprint), + async=True) + + #--Operations for management certificates ---------------------------- + def list_management_certificates(self): + ''' + The List Management Certificates operation lists and returns basic + information about all of the management certificates associated with + the specified subscription. Management certificates, which are also + known as subscription certificates, authenticate clients attempting to + connect to resources associated with your Windows Azure subscription. + ''' + return self._perform_get('/' + self.subscription_id + '/certificates', + SubscriptionCertificates) + + def get_management_certificate(self, thumbprint): + ''' + The Get Management Certificate operation retrieves information about + the management certificate with the specified thumbprint. Management + certificates, which are also known as subscription certificates, + authenticate clients attempting to connect to resources associated + with your Windows Azure subscription. + + thumbprint: The thumbprint value of the certificate. + ''' + _validate_not_none('thumbprint', thumbprint) + return self._perform_get( + '/' + self.subscription_id + '/certificates/' + _str(thumbprint), + SubscriptionCertificate) + + def add_management_certificate(self, public_key, thumbprint, data): + ''' + The Add Management Certificate operation adds a certificate to the + list of management certificates. Management certificates, which are + also known as subscription certificates, authenticate clients + attempting to connect to resources associated with your Windows Azure + subscription. + + public_key: + A base64 representation of the management certificate public key. + thumbprint: + The thumb print that uniquely identifies the management + certificate. + data: The certificate's raw data in base-64 encoded .cer format. + ''' + _validate_not_none('public_key', public_key) + _validate_not_none('thumbprint', thumbprint) + _validate_not_none('data', data) + return self._perform_post( + '/' + self.subscription_id + '/certificates', + _XmlSerializer.subscription_certificate_to_xml( + public_key, thumbprint, data)) + + def delete_management_certificate(self, thumbprint): + ''' + The Delete Management Certificate operation deletes a certificate from + the list of management certificates. Management certificates, which + are also known as subscription certificates, authenticate clients + attempting to connect to resources associated with your Windows Azure + subscription. + + thumbprint: + The thumb print that uniquely identifies the management + certificate. + ''' + _validate_not_none('thumbprint', thumbprint) + return self._perform_delete( + '/' + self.subscription_id + '/certificates/' + _str(thumbprint)) + + #--Operations for affinity groups ------------------------------------ + def list_affinity_groups(self): + ''' + Lists the affinity groups associated with the specified subscription. + ''' + return self._perform_get( + '/' + self.subscription_id + '/affinitygroups', + AffinityGroups) + + def get_affinity_group_properties(self, affinity_group_name): + ''' + Returns the system properties associated with the specified affinity + group. + + affinity_group_name: The name of the affinity group. + ''' + _validate_not_none('affinity_group_name', affinity_group_name) + return self._perform_get( + '/' + self.subscription_id + '/affinitygroups/' + + _str(affinity_group_name) + '', + AffinityGroup) + + def create_affinity_group(self, name, label, location, description=None): + ''' + Creates a new affinity group for the specified subscription. + + name: A name for the affinity group that is unique to the subscription. + label: + A name for the affinity group. The name can be up to 100 characters + in length. + location: + The data center location where the affinity group will be created. + To list available locations, use the list_location function. + description: + A description for the affinity group. The description can be up to + 1024 characters in length. + ''' + _validate_not_none('name', name) + _validate_not_none('label', label) + _validate_not_none('location', location) + return self._perform_post( + '/' + self.subscription_id + '/affinitygroups', + _XmlSerializer.create_affinity_group_to_xml(name, + label, + description, + location)) + + def update_affinity_group(self, affinity_group_name, label, + description=None): + ''' + Updates the label and/or the description for an affinity group for the + specified subscription. + + affinity_group_name: The name of the affinity group. + label: + A name for the affinity group. The name can be up to 100 characters + in length. + description: + A description for the affinity group. The description can be up to + 1024 characters in length. + ''' + _validate_not_none('affinity_group_name', affinity_group_name) + _validate_not_none('label', label) + return self._perform_put( + '/' + self.subscription_id + '/affinitygroups/' + + _str(affinity_group_name), + _XmlSerializer.update_affinity_group_to_xml(label, description)) + + def delete_affinity_group(self, affinity_group_name): + ''' + Deletes an affinity group in the specified subscription. + + affinity_group_name: The name of the affinity group. + ''' + _validate_not_none('affinity_group_name', affinity_group_name) + return self._perform_delete('/' + self.subscription_id + \ + '/affinitygroups/' + \ + _str(affinity_group_name)) + + #--Operations for locations ------------------------------------------ + def list_locations(self): + ''' + Lists all of the data center locations that are valid for your + subscription. + ''' + return self._perform_get('/' + self.subscription_id + '/locations', + Locations) + + #--Operations for tracking asynchronous requests --------------------- + def get_operation_status(self, request_id): + ''' + Returns the status of the specified operation. After calling an + asynchronous operation, you can call Get Operation Status to determine + whether the operation has succeeded, failed, or is still in progress. + + request_id: The request ID for the request you wish to track. + ''' + _validate_not_none('request_id', request_id) + return self._perform_get( + '/' + self.subscription_id + '/operations/' + _str(request_id), + Operation) + + #--Operations for retrieving operating system information ------------ + def list_operating_systems(self): + ''' + Lists the versions of the guest operating system that are currently + available in Windows Azure. + ''' + return self._perform_get( + '/' + self.subscription_id + '/operatingsystems', + OperatingSystems) + + def list_operating_system_families(self): + ''' + Lists the guest operating system families available in Windows Azure, + and also lists the operating system versions available for each family. + ''' + return self._perform_get( + '/' + self.subscription_id + '/operatingsystemfamilies', + OperatingSystemFamilies) + + #--Operations for retrieving subscription history -------------------- + def get_subscription(self): + ''' + Returns account and resource allocation information on the specified + subscription. + ''' + return self._perform_get('/' + self.subscription_id + '', + Subscription) + + #--Operations for virtual machines ----------------------------------- + def get_role(self, service_name, deployment_name, role_name): + ''' + Retrieves the specified virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + return self._perform_get( + self._get_role_path(service_name, deployment_name, role_name), + PersistentVMRole) + + def create_virtual_machine_deployment(self, service_name, deployment_name, + deployment_slot, label, role_name, + system_config, os_virtual_hard_disk, + network_config=None, + availability_set_name=None, + data_virtual_hard_disks=None, + role_size=None, + role_type='PersistentVMRole', + virtual_network_name=None): + ''' + Provisions a virtual machine based on the supplied configuration. + + service_name: Name of the hosted service. + deployment_name: + The name for the deployment. The deployment name must be unique + among other deployments for the hosted service. + deployment_slot: + The environment to which the hosted service is deployed. Valid + values are: staging, production + label: + Specifies an identifier for the deployment. The label can be up to + 100 characters long. The label can be used for tracking purposes. + role_name: The name of the role. + system_config: + Contains the metadata required to provision a virtual machine from + a Windows or Linux OS image. Use an instance of + WindowsConfigurationSet or LinuxConfigurationSet. + os_virtual_hard_disk: + Contains the parameters Windows Azure uses to create the operating + system disk for the virtual machine. + network_config: + Encapsulates the metadata required to create the virtual network + configuration for a virtual machine. If you do not include a + network configuration set you will not be able to access the VM + through VIPs over the internet. If your virtual machine belongs to + a virtual network you can not specify which subnet address space + it resides under. + availability_set_name: + Specifies the name of an availability set to which to add the + virtual machine. This value controls the virtual machine + allocation in the Windows Azure environment. Virtual machines + specified in the same availability set are allocated to different + nodes to maximize availability. + data_virtual_hard_disks: + Contains the parameters Windows Azure uses to create a data disk + for a virtual machine. + role_size: + The size of the virtual machine to allocate. The default value is + Small. Possible values are: ExtraSmall, Small, Medium, Large, + ExtraLarge. The specified value must be compatible with the disk + selected in the OSVirtualHardDisk values. + role_type: + The type of the role for the virtual machine. The only supported + value is PersistentVMRole. + virtual_network_name: + Specifies the name of an existing virtual network to which the + deployment will belong. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('deployment_slot', deployment_slot) + _validate_not_none('label', label) + _validate_not_none('role_name', role_name) + _validate_not_none('system_config', system_config) + _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk) + return self._perform_post( + self._get_deployment_path_using_name(service_name), + _XmlSerializer.virtual_machine_deployment_to_xml( + deployment_name, + deployment_slot, + label, + role_name, + system_config, + os_virtual_hard_disk, + role_type, + network_config, + availability_set_name, + data_virtual_hard_disks, + role_size, + virtual_network_name), + async=True) + + def add_role(self, service_name, deployment_name, role_name, system_config, + os_virtual_hard_disk, network_config=None, + availability_set_name=None, data_virtual_hard_disks=None, + role_size=None, role_type='PersistentVMRole'): + ''' + Adds a virtual machine to an existing deployment. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + system_config: + Contains the metadata required to provision a virtual machine from + a Windows or Linux OS image. Use an instance of + WindowsConfigurationSet or LinuxConfigurationSet. + os_virtual_hard_disk: + Contains the parameters Windows Azure uses to create the operating + system disk for the virtual machine. + network_config: + Encapsulates the metadata required to create the virtual network + configuration for a virtual machine. If you do not include a + network configuration set you will not be able to access the VM + through VIPs over the internet. If your virtual machine belongs to + a virtual network you can not specify which subnet address space + it resides under. + availability_set_name: + Specifies the name of an availability set to which to add the + virtual machine. This value controls the virtual machine allocation + in the Windows Azure environment. Virtual machines specified in the + same availability set are allocated to different nodes to maximize + availability. + data_virtual_hard_disks: + Contains the parameters Windows Azure uses to create a data disk + for a virtual machine. + role_size: + The size of the virtual machine to allocate. The default value is + Small. Possible values are: ExtraSmall, Small, Medium, Large, + ExtraLarge. The specified value must be compatible with the disk + selected in the OSVirtualHardDisk values. + role_type: + The type of the role for the virtual machine. The only supported + value is PersistentVMRole. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('system_config', system_config) + _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk) + return self._perform_post( + self._get_role_path(service_name, deployment_name), + _XmlSerializer.add_role_to_xml( + role_name, + system_config, + os_virtual_hard_disk, + role_type, + network_config, + availability_set_name, + data_virtual_hard_disks, + role_size), + async=True) + + def update_role(self, service_name, deployment_name, role_name, + os_virtual_hard_disk=None, network_config=None, + availability_set_name=None, data_virtual_hard_disks=None, + role_size=None, role_type='PersistentVMRole'): + ''' + Updates the specified virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + os_virtual_hard_disk: + Contains the parameters Windows Azure uses to create the operating + system disk for the virtual machine. + network_config: + Encapsulates the metadata required to create the virtual network + configuration for a virtual machine. If you do not include a + network configuration set you will not be able to access the VM + through VIPs over the internet. If your virtual machine belongs to + a virtual network you can not specify which subnet address space + it resides under. + availability_set_name: + Specifies the name of an availability set to which to add the + virtual machine. This value controls the virtual machine allocation + in the Windows Azure environment. Virtual machines specified in the + same availability set are allocated to different nodes to maximize + availability. + data_virtual_hard_disks: + Contains the parameters Windows Azure uses to create a data disk + for a virtual machine. + role_size: + The size of the virtual machine to allocate. The default value is + Small. Possible values are: ExtraSmall, Small, Medium, Large, + ExtraLarge. The specified value must be compatible with the disk + selected in the OSVirtualHardDisk values. + role_type: + The type of the role for the virtual machine. The only supported + value is PersistentVMRole. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + return self._perform_put( + self._get_role_path(service_name, deployment_name, role_name), + _XmlSerializer.update_role_to_xml( + role_name, + os_virtual_hard_disk, + role_type, + network_config, + availability_set_name, + data_virtual_hard_disks, + role_size), + async=True) + + def delete_role(self, service_name, deployment_name, role_name): + ''' + Deletes the specified virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + return self._perform_delete( + self._get_role_path(service_name, deployment_name, role_name), + async=True) + + def capture_role(self, service_name, deployment_name, role_name, + post_capture_action, target_image_name, + target_image_label, provisioning_configuration=None): + ''' + The Capture Role operation captures a virtual machine image to your + image gallery. From the captured image, you can create additional + customized virtual machines. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + post_capture_action: + Specifies the action after capture operation completes. Possible + values are: Delete, Reprovision. + target_image_name: + Specifies the image name of the captured virtual machine. + target_image_label: + Specifies the friendly name of the captured virtual machine. + provisioning_configuration: + Use an instance of WindowsConfigurationSet or LinuxConfigurationSet. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('post_capture_action', post_capture_action) + _validate_not_none('target_image_name', target_image_name) + _validate_not_none('target_image_label', target_image_label) + return self._perform_post( + self._get_role_instance_operations_path( + service_name, deployment_name, role_name), + _XmlSerializer.capture_role_to_xml( + post_capture_action, + target_image_name, + target_image_label, + provisioning_configuration), + async=True) + + def start_role(self, service_name, deployment_name, role_name): + ''' + Starts the specified virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + return self._perform_post( + self._get_role_instance_operations_path( + service_name, deployment_name, role_name), + _XmlSerializer.start_role_operation_to_xml(), + async=True) + + def start_roles(self, service_name, deployment_name, role_names): + ''' + Starts the specified virtual machines. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_names: The names of the roles, as an enumerable of strings. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_names', role_names) + return self._perform_post( + self._get_roles_operations_path(service_name, deployment_name), + _XmlSerializer.start_roles_operation_to_xml(role_names), + async=True) + + def restart_role(self, service_name, deployment_name, role_name): + ''' + Restarts the specified virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + return self._perform_post( + self._get_role_instance_operations_path( + service_name, deployment_name, role_name), + _XmlSerializer.restart_role_operation_to_xml( + ), + async=True) + + def shutdown_role(self, service_name, deployment_name, role_name, + post_shutdown_action='Stopped'): + ''' + Shuts down the specified virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + post_shutdown_action: + Specifies how the Virtual Machine should be shut down. Values are: + Stopped + Shuts down the Virtual Machine but retains the compute + resources. You will continue to be billed for the resources + that the stopped machine uses. + StoppedDeallocated + Shuts down the Virtual Machine and releases the compute + resources. You are not billed for the compute resources that + this Virtual Machine uses. If a static Virtual Network IP + address is assigned to the Virtual Machine, it is reserved. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('post_shutdown_action', post_shutdown_action) + return self._perform_post( + self._get_role_instance_operations_path( + service_name, deployment_name, role_name), + _XmlSerializer.shutdown_role_operation_to_xml(post_shutdown_action), + async=True) + + def shutdown_roles(self, service_name, deployment_name, role_names, + post_shutdown_action='Stopped'): + ''' + Shuts down the specified virtual machines. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_names: The names of the roles, as an enumerable of strings. + post_shutdown_action: + Specifies how the Virtual Machine should be shut down. Values are: + Stopped + Shuts down the Virtual Machine but retains the compute + resources. You will continue to be billed for the resources + that the stopped machine uses. + StoppedDeallocated + Shuts down the Virtual Machine and releases the compute + resources. You are not billed for the compute resources that + this Virtual Machine uses. If a static Virtual Network IP + address is assigned to the Virtual Machine, it is reserved. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_names', role_names) + _validate_not_none('post_shutdown_action', post_shutdown_action) + return self._perform_post( + self._get_roles_operations_path(service_name, deployment_name), + _XmlSerializer.shutdown_roles_operation_to_xml( + role_names, post_shutdown_action), + async=True) + + #--Operations for virtual machine images ----------------------------- + def list_os_images(self): + ''' + Retrieves a list of the OS images from the image repository. + ''' + return self._perform_get(self._get_image_path(), + Images) + + def get_os_image(self, image_name): + ''' + Retrieves an OS image from the image repository. + ''' + return self._perform_get(self._get_image_path(image_name), + OSImage) + + def add_os_image(self, label, media_link, name, os): + ''' + Adds an OS image that is currently stored in a storage account in your + subscription to the image repository. + + label: Specifies the friendly name of the image. + media_link: + Specifies the location of the blob in Windows Azure blob store + where the media for the image is located. The blob location must + belong to a storage account in the subscription specified by the + value in the operation call. Example: + http://example.blob.core.windows.net/disks/mydisk.vhd + name: + Specifies a name for the OS image that Windows Azure uses to + identify the image when creating one or more virtual machines. + os: + The operating system type of the OS image. Possible values are: + Linux, Windows + ''' + _validate_not_none('label', label) + _validate_not_none('media_link', media_link) + _validate_not_none('name', name) + _validate_not_none('os', os) + return self._perform_post(self._get_image_path(), + _XmlSerializer.os_image_to_xml( + label, media_link, name, os), + async=True) + + def update_os_image(self, image_name, label, media_link, name, os): + ''' + Updates an OS image that in your image repository. + + image_name: The name of the image to update. + label: + Specifies the friendly name of the image to be updated. You cannot + use this operation to update images provided by the Windows Azure + platform. + media_link: + Specifies the location of the blob in Windows Azure blob store + where the media for the image is located. The blob location must + belong to a storage account in the subscription specified by the + value in the operation call. Example: + http://example.blob.core.windows.net/disks/mydisk.vhd + name: + Specifies a name for the OS image that Windows Azure uses to + identify the image when creating one or more VM Roles. + os: + The operating system type of the OS image. Possible values are: + Linux, Windows + ''' + _validate_not_none('image_name', image_name) + _validate_not_none('label', label) + _validate_not_none('media_link', media_link) + _validate_not_none('name', name) + _validate_not_none('os', os) + return self._perform_put(self._get_image_path(image_name), + _XmlSerializer.os_image_to_xml( + label, media_link, name, os), + async=True) + + def delete_os_image(self, image_name, delete_vhd=False): + ''' + Deletes the specified OS image from your image repository. + + image_name: The name of the image. + delete_vhd: Deletes the underlying vhd blob in Azure storage. + ''' + _validate_not_none('image_name', image_name) + path = self._get_image_path(image_name) + if delete_vhd: + path += '?comp=media' + return self._perform_delete(path, async=True) + + #--Operations for virtual machine disks ------------------------------ + def get_data_disk(self, service_name, deployment_name, role_name, lun): + ''' + Retrieves the specified data disk from a virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + lun: The Logical Unit Number (LUN) for the disk. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('lun', lun) + return self._perform_get( + self._get_data_disk_path( + service_name, deployment_name, role_name, lun), + DataVirtualHardDisk) + + def add_data_disk(self, service_name, deployment_name, role_name, lun, + host_caching=None, media_link=None, disk_label=None, + disk_name=None, logical_disk_size_in_gb=None, + source_media_link=None): + ''' + Adds a data disk to a virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + lun: + Specifies the Logical Unit Number (LUN) for the disk. The LUN + specifies the slot in which the data drive appears when mounted + for usage by the virtual machine. Valid LUN values are 0 through 15. + host_caching: + Specifies the platform caching behavior of data disk blob for + read/write efficiency. The default vault is ReadOnly. Possible + values are: None, ReadOnly, ReadWrite + media_link: + Specifies the location of the blob in Windows Azure blob store + where the media for the disk is located. The blob location must + belong to the storage account in the subscription specified by the + value in the operation call. Example: + http://example.blob.core.windows.net/disks/mydisk.vhd + disk_label: + Specifies the description of the data disk. When you attach a disk, + either by directly referencing a media using the MediaLink element + or specifying the target disk size, you can use the DiskLabel + element to customize the name property of the target data disk. + disk_name: + Specifies the name of the disk. Windows Azure uses the specified + disk to create the data disk for the machine and populates this + field with the disk name. + logical_disk_size_in_gb: + Specifies the size, in GB, of an empty disk to be attached to the + role. The disk can be created as part of disk attach or create VM + role call by specifying the value for this property. Windows Azure + creates the empty disk based on size preference and attaches the + newly created disk to the Role. + source_media_link: + Specifies the location of a blob in account storage which is + mounted as a data disk when the virtual machine is created. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('lun', lun) + return self._perform_post( + self._get_data_disk_path(service_name, deployment_name, role_name), + _XmlSerializer.data_virtual_hard_disk_to_xml( + host_caching, + disk_label, + disk_name, + lun, + logical_disk_size_in_gb, + media_link, + source_media_link), + async=True) + + def update_data_disk(self, service_name, deployment_name, role_name, lun, + host_caching=None, media_link=None, updated_lun=None, + disk_label=None, disk_name=None, + logical_disk_size_in_gb=None): + ''' + Updates the specified data disk attached to the specified virtual + machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + lun: + Specifies the Logical Unit Number (LUN) for the disk. The LUN + specifies the slot in which the data drive appears when mounted + for usage by the virtual machine. Valid LUN values are 0 through + 15. + host_caching: + Specifies the platform caching behavior of data disk blob for + read/write efficiency. The default vault is ReadOnly. Possible + values are: None, ReadOnly, ReadWrite + media_link: + Specifies the location of the blob in Windows Azure blob store + where the media for the disk is located. The blob location must + belong to the storage account in the subscription specified by + the value in the operation call. Example: + http://example.blob.core.windows.net/disks/mydisk.vhd + updated_lun: + Specifies the Logical Unit Number (LUN) for the disk. The LUN + specifies the slot in which the data drive appears when mounted + for usage by the virtual machine. Valid LUN values are 0 through 15. + disk_label: + Specifies the description of the data disk. When you attach a disk, + either by directly referencing a media using the MediaLink element + or specifying the target disk size, you can use the DiskLabel + element to customize the name property of the target data disk. + disk_name: + Specifies the name of the disk. Windows Azure uses the specified + disk to create the data disk for the machine and populates this + field with the disk name. + logical_disk_size_in_gb: + Specifies the size, in GB, of an empty disk to be attached to the + role. The disk can be created as part of disk attach or create VM + role call by specifying the value for this property. Windows Azure + creates the empty disk based on size preference and attaches the + newly created disk to the Role. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('lun', lun) + return self._perform_put( + self._get_data_disk_path( + service_name, deployment_name, role_name, lun), + _XmlSerializer.data_virtual_hard_disk_to_xml( + host_caching, + disk_label, + disk_name, + updated_lun, + logical_disk_size_in_gb, + media_link, + None), + async=True) + + def delete_data_disk(self, service_name, deployment_name, role_name, lun, delete_vhd=False): + ''' + Removes the specified data disk from a virtual machine. + + service_name: The name of the service. + deployment_name: The name of the deployment. + role_name: The name of the role. + lun: The Logical Unit Number (LUN) for the disk. + delete_vhd: Deletes the underlying vhd blob in Azure storage. + ''' + _validate_not_none('service_name', service_name) + _validate_not_none('deployment_name', deployment_name) + _validate_not_none('role_name', role_name) + _validate_not_none('lun', lun) + path = self._get_data_disk_path(service_name, deployment_name, role_name, lun) + if delete_vhd: + path += '?comp=media' + return self._perform_delete(path, async=True) + + #--Operations for virtual machine disks ------------------------------ + def list_disks(self): + ''' + Retrieves a list of the disks in your image repository. + ''' + return self._perform_get(self._get_disk_path(), + Disks) + + def get_disk(self, disk_name): + ''' + Retrieves a disk from your image repository. + ''' + return self._perform_get(self._get_disk_path(disk_name), + Disk) + + def add_disk(self, has_operating_system, label, media_link, name, os): + ''' + Adds a disk to the user image repository. The disk can be an OS disk + or a data disk. + + has_operating_system: + Specifies whether the disk contains an operation system. Only a + disk with an operating system installed can be mounted as OS Drive. + label: Specifies the description of the disk. + media_link: + Specifies the location of the blob in Windows Azure blob store + where the media for the disk is located. The blob location must + belong to the storage account in the current subscription specified + by the value in the operation call. Example: + http://example.blob.core.windows.net/disks/mydisk.vhd + name: + Specifies a name for the disk. Windows Azure uses the name to + identify the disk when creating virtual machines from the disk. + os: The OS type of the disk. Possible values are: Linux, Windows + ''' + _validate_not_none('has_operating_system', has_operating_system) + _validate_not_none('label', label) + _validate_not_none('media_link', media_link) + _validate_not_none('name', name) + _validate_not_none('os', os) + return self._perform_post(self._get_disk_path(), + _XmlSerializer.disk_to_xml( + has_operating_system, + label, + media_link, + name, + os)) + + def update_disk(self, disk_name, has_operating_system, label, media_link, + name, os): + ''' + Updates an existing disk in your image repository. + + disk_name: The name of the disk to update. + has_operating_system: + Specifies whether the disk contains an operation system. Only a + disk with an operating system installed can be mounted as OS Drive. + label: Specifies the description of the disk. + media_link: + Specifies the location of the blob in Windows Azure blob store + where the media for the disk is located. The blob location must + belong to the storage account in the current subscription specified + by the value in the operation call. Example: + http://example.blob.core.windows.net/disks/mydisk.vhd + name: + Specifies a name for the disk. Windows Azure uses the name to + identify the disk when creating virtual machines from the disk. + os: The OS type of the disk. Possible values are: Linux, Windows + ''' + _validate_not_none('disk_name', disk_name) + _validate_not_none('has_operating_system', has_operating_system) + _validate_not_none('label', label) + _validate_not_none('media_link', media_link) + _validate_not_none('name', name) + _validate_not_none('os', os) + return self._perform_put(self._get_disk_path(disk_name), + _XmlSerializer.disk_to_xml( + has_operating_system, + label, + media_link, + name, + os)) + + def delete_disk(self, disk_name, delete_vhd=False): + ''' + Deletes the specified data or operating system disk from your image + repository. + + disk_name: The name of the disk to delete. + delete_vhd: Deletes the underlying vhd blob in Azure storage. + ''' + _validate_not_none('disk_name', disk_name) + path = self._get_disk_path(disk_name) + if delete_vhd: + path += '?comp=media' + return self._perform_delete(path) + + #--Operations for virtual networks ------------------------------ + def list_virtual_network_sites(self): + ''' + Retrieves a list of the virtual networks. + ''' + return self._perform_get(self._get_virtual_network_site_path(), VirtualNetworkSites) + + #--Helper functions -------------------------------------------------- + def _get_virtual_network_site_path(self): + return self._get_path('services/networking/virtualnetwork', None) + + def _get_storage_service_path(self, service_name=None): + return self._get_path('services/storageservices', service_name) + + def _get_hosted_service_path(self, service_name=None): + return self._get_path('services/hostedservices', service_name) + + def _get_deployment_path_using_slot(self, service_name, slot=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deploymentslots', slot) + + def _get_deployment_path_using_name(self, service_name, + deployment_name=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments', deployment_name) + + def _get_role_path(self, service_name, deployment_name, role_name=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments/' + deployment_name + + '/roles', role_name) + + def _get_role_instance_operations_path(self, service_name, deployment_name, + role_name=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments/' + deployment_name + + '/roleinstances', role_name) + '/Operations' + + def _get_roles_operations_path(self, service_name, deployment_name): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments/' + deployment_name + + '/roles/Operations', None) + + def _get_data_disk_path(self, service_name, deployment_name, role_name, + lun=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments/' + _str(deployment_name) + + '/roles/' + _str(role_name) + '/DataDisks', lun) + + def _get_disk_path(self, disk_name=None): + return self._get_path('services/disks', disk_name) + + def _get_image_path(self, image_name=None): + return self._get_path('services/images', image_name) diff --git a/awx/lib/site-packages/azure/storage/__init__.py b/awx/lib/site-packages/azure/storage/__init__.py new file mode 100644 index 0000000000..5a28afd0bb --- /dev/null +++ b/awx/lib/site-packages/azure/storage/__init__.py @@ -0,0 +1,913 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import hashlib +import hmac +import sys +import types + +from datetime import datetime +from xml.dom import minidom +from azure import (WindowsAzureData, + WindowsAzureError, + METADATA_NS, + xml_escape, + _create_entry, + _decode_base64_to_text, + _decode_base64_to_bytes, + _encode_base64, + _fill_data_minidom, + _fill_instance_element, + _get_child_nodes, + _get_child_nodesNS, + _get_children_from_path, + _get_entry_properties, + _general_error_handler, + _list_of, + _parse_response_for_dict, + _unicode_type, + _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY, + ) + +# x-ms-version for storage service. +X_MS_VERSION = '2012-02-12' + + +class EnumResultsBase(object): + + ''' base class for EnumResults. ''' + + def __init__(self): + self.prefix = u'' + self.marker = u'' + self.max_results = 0 + self.next_marker = u'' + + +class ContainerEnumResults(EnumResultsBase): + + ''' Blob Container list. ''' + + def __init__(self): + EnumResultsBase.__init__(self) + self.containers = _list_of(Container) + + def __iter__(self): + return iter(self.containers) + + def __len__(self): + return len(self.containers) + + def __getitem__(self, index): + return self.containers[index] + + +class Container(WindowsAzureData): + + ''' Blob container class. ''' + + def __init__(self): + self.name = u'' + self.url = u'' + self.properties = Properties() + self.metadata = {} + + +class Properties(WindowsAzureData): + + ''' Blob container's properties class. ''' + + def __init__(self): + self.last_modified = u'' + self.etag = u'' + + +class RetentionPolicy(WindowsAzureData): + + ''' RetentionPolicy in service properties. ''' + + def __init__(self): + self.enabled = False + self.__dict__['days'] = None + + def get_days(self): + # convert days to int value + return int(self.__dict__['days']) + + def set_days(self, value): + ''' set default days if days is set to empty. ''' + self.__dict__['days'] = value + + days = property(fget=get_days, fset=set_days) + + +class Logging(WindowsAzureData): + + ''' Logging class in service properties. ''' + + def __init__(self): + self.version = u'1.0' + self.delete = False + self.read = False + self.write = False + self.retention_policy = RetentionPolicy() + + +class Metrics(WindowsAzureData): + + ''' Metrics class in service properties. ''' + + def __init__(self): + self.version = u'1.0' + self.enabled = False + self.include_apis = None + self.retention_policy = RetentionPolicy() + + +class StorageServiceProperties(WindowsAzureData): + + ''' Storage Service Propeties class. ''' + + def __init__(self): + self.logging = Logging() + self.metrics = Metrics() + + +class AccessPolicy(WindowsAzureData): + + ''' Access Policy class in service properties. ''' + + def __init__(self, start=u'', expiry=u'', permission='u'): + self.start = start + self.expiry = expiry + self.permission = permission + + +class SignedIdentifier(WindowsAzureData): + + ''' Signed Identifier class for service properties. ''' + + def __init__(self): + self.id = u'' + self.access_policy = AccessPolicy() + + +class SignedIdentifiers(WindowsAzureData): + + ''' SignedIdentifier list. ''' + + def __init__(self): + self.signed_identifiers = _list_of(SignedIdentifier) + + def __iter__(self): + return iter(self.signed_identifiers) + + def __len__(self): + return len(self.signed_identifiers) + + def __getitem__(self, index): + return self.signed_identifiers[index] + + +class BlobEnumResults(EnumResultsBase): + + ''' Blob list.''' + + def __init__(self): + EnumResultsBase.__init__(self) + self.blobs = _list_of(Blob) + self.prefixes = _list_of(BlobPrefix) + self.delimiter = '' + + def __iter__(self): + return iter(self.blobs) + + def __len__(self): + return len(self.blobs) + + def __getitem__(self, index): + return self.blobs[index] + + +class BlobResult(bytes): + + def __new__(cls, blob, properties): + return bytes.__new__(cls, blob if blob else b'') + + def __init__(self, blob, properties): + self.properties = properties + + +class Blob(WindowsAzureData): + + ''' Blob class. ''' + + def __init__(self): + self.name = u'' + self.snapshot = u'' + self.url = u'' + self.properties = BlobProperties() + self.metadata = {} + + +class BlobProperties(WindowsAzureData): + + ''' Blob Properties ''' + + def __init__(self): + self.last_modified = u'' + self.etag = u'' + self.content_length = 0 + self.content_type = u'' + self.content_encoding = u'' + self.content_language = u'' + self.content_md5 = u'' + self.xms_blob_sequence_number = 0 + self.blob_type = u'' + self.lease_status = u'' + self.lease_state = u'' + self.lease_duration = u'' + self.copy_id = u'' + self.copy_source = u'' + self.copy_status = u'' + self.copy_progress = u'' + self.copy_completion_time = u'' + self.copy_status_description = u'' + + +class BlobPrefix(WindowsAzureData): + + ''' BlobPrefix in Blob. ''' + + def __init__(self): + self.name = '' + + +class BlobBlock(WindowsAzureData): + + ''' BlobBlock class ''' + + def __init__(self, id=None, size=None): + self.id = id + self.size = size + + +class BlobBlockList(WindowsAzureData): + + ''' BlobBlockList class ''' + + def __init__(self): + self.committed_blocks = [] + self.uncommitted_blocks = [] + + +class PageRange(WindowsAzureData): + + ''' Page Range for page blob. ''' + + def __init__(self): + self.start = 0 + self.end = 0 + + +class PageList(object): + + ''' Page list for page blob. ''' + + def __init__(self): + self.page_ranges = _list_of(PageRange) + + def __iter__(self): + return iter(self.page_ranges) + + def __len__(self): + return len(self.page_ranges) + + def __getitem__(self, index): + return self.page_ranges[index] + + +class QueueEnumResults(EnumResultsBase): + + ''' Queue list''' + + def __init__(self): + EnumResultsBase.__init__(self) + self.queues = _list_of(Queue) + + def __iter__(self): + return iter(self.queues) + + def __len__(self): + return len(self.queues) + + def __getitem__(self, index): + return self.queues[index] + + +class Queue(WindowsAzureData): + + ''' Queue class ''' + + def __init__(self): + self.name = u'' + self.url = u'' + self.metadata = {} + + +class QueueMessagesList(WindowsAzureData): + + ''' Queue message list. ''' + + def __init__(self): + self.queue_messages = _list_of(QueueMessage) + + def __iter__(self): + return iter(self.queue_messages) + + def __len__(self): + return len(self.queue_messages) + + def __getitem__(self, index): + return self.queue_messages[index] + + +class QueueMessage(WindowsAzureData): + + ''' Queue message class. ''' + + def __init__(self): + self.message_id = u'' + self.insertion_time = u'' + self.expiration_time = u'' + self.pop_receipt = u'' + self.time_next_visible = u'' + self.dequeue_count = u'' + self.message_text = u'' + + +class Entity(WindowsAzureData): + + ''' Entity class. The attributes of entity will be created dynamically. ''' + pass + + +class EntityProperty(WindowsAzureData): + + ''' Entity property. contains type and value. ''' + + def __init__(self, type=None, value=None): + self.type = type + self.value = value + + +class Table(WindowsAzureData): + + ''' Only for intellicens and telling user the return type. ''' + pass + + +def _parse_blob_enum_results_list(response): + respbody = response.body + return_obj = BlobEnumResults() + doc = minidom.parseString(respbody) + + for enum_results in _get_child_nodes(doc, 'EnumerationResults'): + for child in _get_children_from_path(enum_results, 'Blobs', 'Blob'): + return_obj.blobs.append(_fill_instance_element(child, Blob)) + + for child in _get_children_from_path(enum_results, + 'Blobs', + 'BlobPrefix'): + return_obj.prefixes.append( + _fill_instance_element(child, BlobPrefix)) + + for name, value in vars(return_obj).items(): + if name == 'blobs' or name == 'prefixes': + continue + value = _fill_data_minidom(enum_results, name, value) + if value is not None: + setattr(return_obj, name, value) + + return return_obj + + +def _update_storage_header(request): + ''' add additional headers for storage request. ''' + if request.body: + assert isinstance(request.body, bytes) + + # if it is PUT, POST, MERGE, DELETE, need to add content-lengt to header. + if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: + request.headers.append(('Content-Length', str(len(request.body)))) + + # append addtional headers base on the service + request.headers.append(('x-ms-version', X_MS_VERSION)) + + # append x-ms-meta name, values to header + for name, value in request.headers: + if 'x-ms-meta-name-values' in name and value: + for meta_name, meta_value in value.items(): + request.headers.append(('x-ms-meta-' + meta_name, meta_value)) + request.headers.remove((name, value)) + break + return request + + +def _update_storage_blob_header(request, account_name, account_key): + ''' add additional headers for storage blob request. ''' + + request = _update_storage_header(request) + current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + request.headers.append(('x-ms-date', current_time)) + request.headers.append( + ('Content-Type', 'application/octet-stream Charset=UTF-8')) + request.headers.append(('Authorization', + _sign_storage_blob_request(request, + account_name, + account_key))) + + return request.headers + + +def _update_storage_queue_header(request, account_name, account_key): + ''' add additional headers for storage queue request. ''' + return _update_storage_blob_header(request, account_name, account_key) + + +def _update_storage_table_header(request): + ''' add additional headers for storage table request. ''' + + request = _update_storage_header(request) + for name, _ in request.headers: + if name.lower() == 'content-type': + break + else: + request.headers.append(('Content-Type', 'application/atom+xml')) + request.headers.append(('DataServiceVersion', '2.0;NetFx')) + request.headers.append(('MaxDataServiceVersion', '2.0;NetFx')) + current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + request.headers.append(('x-ms-date', current_time)) + request.headers.append(('Date', current_time)) + return request.headers + + +def _sign_storage_blob_request(request, account_name, account_key): + ''' + Returns the signed string for blob request which is used to set + Authorization header. This is also used to sign queue request. + ''' + + uri_path = request.path.split('?')[0] + + # method to sign + string_to_sign = request.method + '\n' + + # get headers to sign + headers_to_sign = [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'range'] + + request_header_dict = dict((name.lower(), value) + for name, value in request.headers if value) + string_to_sign += '\n'.join(request_header_dict.get(x, '') + for x in headers_to_sign) + '\n' + + # get x-ms header to sign + x_ms_headers = [] + for name, value in request.headers: + if 'x-ms' in name: + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value: + string_to_sign += ''.join([name, ':', value, '\n']) + + # get account_name and uri path to sign + string_to_sign += '/' + account_name + uri_path + + # get query string to sign if it is not table service + query_to_sign = request.query + query_to_sign.sort() + + current_name = '' + for name, value in query_to_sign: + if value: + if current_name != name: + string_to_sign += '\n' + name + ':' + value + else: + string_to_sign += '\n' + ',' + value + + # sign the request + auth_string = 'SharedKey ' + account_name + ':' + \ + _sign_string(account_key, string_to_sign) + return auth_string + + +def _sign_storage_table_request(request, account_name, account_key): + uri_path = request.path.split('?')[0] + + string_to_sign = request.method + '\n' + headers_to_sign = ['content-md5', 'content-type', 'date'] + request_header_dict = dict((name.lower(), value) + for name, value in request.headers if value) + string_to_sign += '\n'.join(request_header_dict.get(x, '') + for x in headers_to_sign) + '\n' + + # get account_name and uri path to sign + string_to_sign += ''.join(['/', account_name, uri_path]) + + for name, value in request.query: + if name == 'comp' and uri_path == '/': + string_to_sign += '?comp=' + value + break + + # sign the request + auth_string = 'SharedKey ' + account_name + ':' + \ + _sign_string(account_key, string_to_sign) + return auth_string + + +def _sign_string(account_key, string_to_sign): + decoded_account_key = _decode_base64_to_bytes(account_key) + if isinstance(string_to_sign, _unicode_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC( + decoded_account_key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = _encode_base64(digest) + return encoded_digest + + +def _to_python_bool(value): + if value.lower() == 'true': + return True + return False + + +def _to_entity_int(data): + int_max = (2 << 30) - 1 + if data > (int_max) or data < (int_max + 1) * (-1): + return 'Edm.Int64', str(data) + else: + return 'Edm.Int32', str(data) + + +def _to_entity_bool(value): + if value: + return 'Edm.Boolean', 'true' + return 'Edm.Boolean', 'false' + + +def _to_entity_datetime(value): + return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%S') + + +def _to_entity_float(value): + return 'Edm.Double', str(value) + + +def _to_entity_property(value): + if value.type == 'Edm.Binary': + return value.type, _encode_base64(value.value) + + return value.type, str(value.value) + + +def _to_entity_none(value): + return None, None + + +def _to_entity_str(value): + return 'Edm.String', value + + +# Tables of conversions to and from entity types. We support specific +# datatypes, and beyond that the user can use an EntityProperty to get +# custom data type support. + +def _from_entity_binary(value): + return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value)) + + +def _from_entity_int(value): + return int(value) + + +def _from_entity_datetime(value): + format = '%Y-%m-%dT%H:%M:%S' + if '.' in value: + format = format + '.%f' + if value.endswith('Z'): + format = format + 'Z' + return datetime.strptime(value, format) + +_ENTITY_TO_PYTHON_CONVERSIONS = { + 'Edm.Binary': _from_entity_binary, + 'Edm.Int32': _from_entity_int, + 'Edm.Int64': _from_entity_int, + 'Edm.Double': float, + 'Edm.Boolean': _to_python_bool, + 'Edm.DateTime': _from_entity_datetime, +} + +# Conversion from Python type to a function which returns a tuple of the +# type string and content string. +_PYTHON_TO_ENTITY_CONVERSIONS = { + int: _to_entity_int, + bool: _to_entity_bool, + datetime: _to_entity_datetime, + float: _to_entity_float, + EntityProperty: _to_entity_property, + str: _to_entity_str, +} + +if sys.version_info < (3,): + _PYTHON_TO_ENTITY_CONVERSIONS.update({ + long: _to_entity_int, + types.NoneType: _to_entity_none, + unicode: _to_entity_str, + }) + + +def _convert_entity_to_xml(source): + ''' Converts an entity object to xml to send. + + The entity format is: + + + <updated>2008-09-18T23:46:19.3857256Z</updated> + <author> + <name /> + </author> + <id /> + <content type="application/xml"> + <m:properties> + <d:Address>Mountain View</d:Address> + <d:Age m:type="Edm.Int32">23</d:Age> + <d:AmountDue m:type="Edm.Double">200.23</d:AmountDue> + <d:BinaryData m:type="Edm.Binary" m:null="true" /> + <d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode> + <d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince> + <d:IsActive m:type="Edm.Boolean">true</d:IsActive> + <d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders> + <d:PartitionKey>mypartitionkey</d:PartitionKey> + <d:RowKey>myrowkey1</d:RowKey> + <d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp> + </m:properties> + </content> + </entry> + ''' + + # construct the entity body included in <m:properties> and </m:properties> + entity_body = '<m:properties xml:space="preserve">{properties}</m:properties>' + + if isinstance(source, WindowsAzureData): + source = vars(source) + + properties_str = '' + + # set properties type for types we know if value has no type info. + # if value has type info, then set the type to value.type + for name, value in source.items(): + mtype = '' + conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value)) + if conv is None and sys.version_info >= (3,) and value is None: + conv = _to_entity_none + if conv is None: + raise WindowsAzureError( + _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format( + type(value).__name__)) + + mtype, value = conv(value) + + # form the property node + properties_str += ''.join(['<d:', name]) + if value is None: + properties_str += ' m:null="true" />' + else: + if mtype: + properties_str += ''.join([' m:type="', mtype, '"']) + properties_str += ''.join(['>', + xml_escape(value), '</d:', name, '>']) + + if sys.version_info < (3,): + if isinstance(properties_str, unicode): + properties_str = properties_str.encode(encoding='utf-8') + + # generate the entity_body + entity_body = entity_body.format(properties=properties_str) + xmlstr = _create_entry(entity_body) + return xmlstr + + +def _convert_table_to_xml(table_name): + ''' + Create xml to send for a given table name. Since xml format for table is + the same as entity and the only difference is that table has only one + property 'TableName', so we just call _convert_entity_to_xml. + + table_name: the name of the table + ''' + return _convert_entity_to_xml({'TableName': table_name}) + + +def _convert_block_list_to_xml(block_id_list): + ''' + Convert a block list to xml to send. + + block_id_list: + a str list containing the block ids that are used in put_block_list. + Only get block from latest blocks. + ''' + if block_id_list is None: + return '' + xml = '<?xml version="1.0" encoding="utf-8"?><BlockList>' + for value in block_id_list: + xml += '<Latest>{0}</Latest>'.format(_encode_base64(value)) + + return xml + '</BlockList>' + + +def _create_blob_result(response): + blob_properties = _parse_response_for_dict(response) + return BlobResult(response.body, blob_properties) + + +def _convert_response_to_block_list(response): + ''' + Converts xml response to block list class. + ''' + blob_block_list = BlobBlockList() + + xmldoc = minidom.parseString(response.body) + for xml_block in _get_children_from_path(xmldoc, + 'BlockList', + 'CommittedBlocks', + 'Block'): + xml_block_id = _decode_base64_to_text( + _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue) + xml_block_size = int( + _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue) + blob_block_list.committed_blocks.append( + BlobBlock(xml_block_id, xml_block_size)) + + for xml_block in _get_children_from_path(xmldoc, + 'BlockList', + 'UncommittedBlocks', + 'Block'): + xml_block_id = _decode_base64_to_text( + _get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue) + xml_block_size = int( + _get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue) + blob_block_list.uncommitted_blocks.append( + BlobBlock(xml_block_id, xml_block_size)) + + return blob_block_list + + +def _remove_prefix(name): + colon = name.find(':') + if colon != -1: + return name[colon + 1:] + return name + + +def _convert_response_to_entity(response): + if response is None: + return response + return _convert_xml_to_entity(response.body) + + +def _convert_xml_to_entity(xmlstr): + ''' Convert xml response to entity. + + The format of entity: + <entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom"> + <title /> + <updated>2008-09-18T23:46:19.3857256Z</updated> + <author> + <name /> + </author> + <id /> + <content type="application/xml"> + <m:properties> + <d:Address>Mountain View</d:Address> + <d:Age m:type="Edm.Int32">23</d:Age> + <d:AmountDue m:type="Edm.Double">200.23</d:AmountDue> + <d:BinaryData m:type="Edm.Binary" m:null="true" /> + <d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode> + <d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince> + <d:IsActive m:type="Edm.Boolean">true</d:IsActive> + <d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders> + <d:PartitionKey>mypartitionkey</d:PartitionKey> + <d:RowKey>myrowkey1</d:RowKey> + <d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp> + </m:properties> + </content> + </entry> + ''' + xmldoc = minidom.parseString(xmlstr) + + xml_properties = None + for entry in _get_child_nodes(xmldoc, 'entry'): + for content in _get_child_nodes(entry, 'content'): + # TODO: Namespace + xml_properties = _get_child_nodesNS( + content, METADATA_NS, 'properties') + + if not xml_properties: + return None + + entity = Entity() + # extract each property node and get the type from attribute and node value + for xml_property in xml_properties[0].childNodes: + name = _remove_prefix(xml_property.nodeName) + # exclude the Timestamp since it is auto added by azure when + # inserting entity. We don't want this to mix with real properties + if name in ['Timestamp']: + continue + + if xml_property.firstChild: + value = xml_property.firstChild.nodeValue + else: + value = '' + + isnull = xml_property.getAttributeNS(METADATA_NS, 'null') + mtype = xml_property.getAttributeNS(METADATA_NS, 'type') + + # if not isnull and no type info, then it is a string and we just + # need the str type to hold the property. + if not isnull and not mtype: + _set_entity_attr(entity, name, value) + elif isnull == 'true': + if mtype: + property = EntityProperty(mtype, None) + else: + property = EntityProperty('Edm.String', None) + else: # need an object to hold the property + conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype) + if conv is not None: + property = conv(value) + else: + property = EntityProperty(mtype, value) + _set_entity_attr(entity, name, property) + + # extract id, updated and name value from feed entry and set them of + # rule. + for name, value in _get_entry_properties(xmlstr, True).items(): + if name in ['etag']: + _set_entity_attr(entity, name, value) + + return entity + + +def _set_entity_attr(entity, name, value): + try: + setattr(entity, name, value) + except UnicodeEncodeError: + # Python 2 doesn't support unicode attribute names, so we'll + # add them and access them directly through the dictionary + entity.__dict__[name] = value + + +def _convert_xml_to_table(xmlstr): + ''' Converts the xml response to table class. + Simply call convert_xml_to_entity and extract the table name, and add + updated and author info + ''' + table = Table() + entity = _convert_xml_to_entity(xmlstr) + setattr(table, 'name', entity.TableName) + for name, value in _get_entry_properties(xmlstr, False).items(): + setattr(table, name, value) + return table + + +def _storage_error_handler(http_error): + ''' Simple error handler for storage service. ''' + return _general_error_handler(http_error) + +# make these available just from storage. +from azure.storage.blobservice import BlobService +from azure.storage.queueservice import QueueService +from azure.storage.tableservice import TableService +from azure.storage.cloudstorageaccount import CloudStorageAccount +from azure.storage.sharedaccesssignature import ( + SharedAccessSignature, + SharedAccessPolicy, + Permission, + WebResource, + ) diff --git a/awx/lib/site-packages/azure/storage/blobservice.py b/awx/lib/site-packages/azure/storage/blobservice.py new file mode 100644 index 0000000000..aca56b4584 --- /dev/null +++ b/awx/lib/site-packages/azure/storage/blobservice.py @@ -0,0 +1,2225 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure import ( + WindowsAzureError, + BLOB_SERVICE_HOST_BASE, + DEV_BLOB_HOST, + _ERROR_VALUE_NEGATIVE, + _ERROR_PAGE_BLOB_SIZE_ALIGNMENT, + _convert_class_to_xml, + _dont_fail_not_exist, + _dont_fail_on_exist, + _encode_base64, + _get_request_body, + _get_request_body_bytes_only, + _int_or_none, + _parse_enum_results_list, + _parse_response, + _parse_response_for_dict, + _parse_response_for_dict_filter, + _parse_response_for_dict_prefix, + _parse_simple_list, + _str, + _str_or_none, + _update_request_uri_query_local_storage, + _validate_type_bytes, + _validate_not_none, + ) +from azure.http import HTTPRequest +from azure.storage import ( + Container, + ContainerEnumResults, + PageList, + PageRange, + SignedIdentifiers, + StorageServiceProperties, + _convert_block_list_to_xml, + _convert_response_to_block_list, + _create_blob_result, + _parse_blob_enum_results_list, + _update_storage_blob_header, + ) +from azure.storage.storageclient import _StorageClient +from os import path +import sys +if sys.version_info >= (3,): + from io import BytesIO +else: + from cStringIO import StringIO as BytesIO + +# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT +_PAGE_SIZE = 512 + +class BlobService(_StorageClient): + + ''' + This is the main class managing Blob resources. + ''' + + def __init__(self, account_name=None, account_key=None, protocol='https', + host_base=BLOB_SERVICE_HOST_BASE, dev_host=DEV_BLOB_HOST): + ''' + account_name: your storage account name, required for all operations. + account_key: your storage account key, required for all operations. + protocol: Optional. Protocol. Defaults to https. + host_base: + Optional. Live host base url. Defaults to Azure url. Override this + for on-premise. + dev_host: Optional. Dev host url. Defaults to localhost. + ''' + self._BLOB_MAX_DATA_SIZE = 64 * 1024 * 1024 + self._BLOB_MAX_CHUNK_DATA_SIZE = 4 * 1024 * 1024 + super(BlobService, self).__init__( + account_name, account_key, protocol, host_base, dev_host) + + def make_blob_url(self, container_name, blob_name, account_name=None, + protocol=None, host_base=None): + ''' + Creates the url to access a blob. + + container_name: Name of container. + blob_name: Name of blob. + account_name: + Name of the storage account. If not specified, uses the account + specified when BlobService was initialized. + protocol: + Protocol to use: 'http' or 'https'. If not specified, uses the + protocol specified when BlobService was initialized. + host_base: + Live host base url. If not specified, uses the host base specified + when BlobService was initialized. + ''' + if not account_name: + account_name = self.account_name + if not protocol: + protocol = self.protocol + if not host_base: + host_base = self.host_base + + return '{0}://{1}{2}/{3}/{4}'.format(protocol, + account_name, + host_base, + container_name, + blob_name) + + def list_containers(self, prefix=None, marker=None, maxresults=None, + include=None): + ''' + The List Containers operation returns a list of the containers under + the specified account. + + prefix: + Optional. Filters the results to return only containers whose names + begin with the specified prefix. + marker: + Optional. A string value that identifies the portion of the list to + be returned with the next list operation. + maxresults: + Optional. Specifies the maximum number of containers to return. + include: + Optional. Include this parameter to specify that the container's + metadata be returned as part of the response body. set this + parameter to string 'metadata' to get container's metadata. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/?comp=list' + request.query = [ + ('prefix', _str_or_none(prefix)), + ('marker', _str_or_none(marker)), + ('maxresults', _int_or_none(maxresults)), + ('include', _str_or_none(include)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_enum_results_list(response, + ContainerEnumResults, + "Containers", + Container) + + def create_container(self, container_name, x_ms_meta_name_values=None, + x_ms_blob_public_access=None, fail_on_exist=False): + ''' + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + container_name: Name of container to create. + x_ms_meta_name_values: + Optional. A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + x_ms_blob_public_access: + Optional. Possible values include: container, blob + fail_on_exist: + specify whether to throw an exception when the container exists. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '?restype=container' + request.headers = [ + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + if not fail_on_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_container_properties(self, container_name, x_ms_lease_id=None): + ''' + Returns all user-defined metadata and system properties for the + specified container. + + container_name: Name of existing container. + x_ms_lease_id: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '?restype=container' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict(response) + + def get_container_metadata(self, container_name, x_ms_lease_id=None): + ''' + Returns all user-defined metadata for the specified container. The + metadata will be in returned dictionary['x-ms-meta-(name)']. + + container_name: Name of existing container. + x_ms_lease_id: + If specified, get_container_metadata only succeeds if the + container's lease is active and matches this ID. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '?restype=container&comp=metadata' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta']) + + def set_container_metadata(self, container_name, + x_ms_meta_name_values=None, x_ms_lease_id=None): + ''' + Sets one or more user-defined name-value pairs for the specified + container. + + container_name: Name of existing container. + x_ms_meta_name_values: + A dict containing name, value for metadata. + Example: {'category':'test'} + x_ms_lease_id: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '?restype=container&comp=metadata' + request.headers = [ + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def get_container_acl(self, container_name, x_ms_lease_id=None): + ''' + Gets the permissions for the specified container. + + container_name: Name of existing container. + x_ms_lease_id: + If specified, get_container_acl only succeeds if the + container's lease is active and matches this ID. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '?restype=container&comp=acl' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response(response, SignedIdentifiers) + + def set_container_acl(self, container_name, signed_identifiers=None, + x_ms_blob_public_access=None, x_ms_lease_id=None): + ''' + Sets the permissions for the specified container. + + container_name: Name of existing container. + signed_identifiers: SignedIdentifers instance + x_ms_blob_public_access: + Optional. Possible values include: container, blob + x_ms_lease_id: + If specified, set_container_acl only succeeds if the + container's lease is active and matches this ID. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '?restype=container&comp=acl' + request.headers = [ + ('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ] + request.body = _get_request_body( + _convert_class_to_xml(signed_identifiers)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def delete_container(self, container_name, fail_not_exist=False, + x_ms_lease_id=None): + ''' + Marks the specified container for deletion. + + container_name: Name of container to delete. + fail_not_exist: + Specify whether to throw an exception when the container doesn't + exist. + x_ms_lease_id: Required if the container has an active lease. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '?restype=container' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def lease_container(self, container_name, x_ms_lease_action, + x_ms_lease_id=None, x_ms_lease_duration=60, + x_ms_lease_break_period=None, + x_ms_proposed_lease_id=None): + ''' + Establishes and manages a lock on a container for delete operations. + The lock duration can be 15 to 60 seconds, or can be infinite. + + container_name: Name of existing container. + x_ms_lease_action: + Required. Possible values: acquire|renew|release|break|change + x_ms_lease_id: Required if the container has an active lease. + x_ms_lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. For backwards compatibility, the default is + 60, and the value is only used on an acquire operation. + x_ms_lease_break_period: + Optional. For a break operation, this is the proposed duration of + seconds that the lease should continue before it is broken, between + 0 and 60 seconds. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining + on the lease is used. A new lease will not be available before the + break period has expired, but the lease may be held for longer than + the break period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + x_ms_proposed_lease_id: + Optional for acquire, required for change. Proposed lease ID, in a + GUID string format. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('x_ms_lease_action', x_ms_lease_action) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '?restype=container&comp=lease' + request.headers = [ + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-lease-action', _str_or_none(x_ms_lease_action)), + ('x-ms-lease-duration', + _str_or_none( + x_ms_lease_duration if x_ms_lease_action == 'acquire'\ + else None)), + ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)), + ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)), + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_filter( + response, + filter=['x-ms-lease-id', 'x-ms-lease-time']) + + def list_blobs(self, container_name, prefix=None, marker=None, + maxresults=None, include=None, delimiter=None): + ''' + Returns the list of blobs under the specified container. + + container_name: Name of existing container. + prefix: + Optional. Filters the results to return only blobs whose names + begin with the specified prefix. + marker: + Optional. A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns + a marker value within the response body if the list returned was + not complete. The marker value may then be used in a subsequent + call to request the next set of list items. The marker value is + opaque to the client. + maxresults: + Optional. Specifies the maximum number of blobs to return, + including all BlobPrefix elements. If the request does not specify + maxresults or specifies a value greater than 5,000, the server will + return up to 5,000 items. Setting maxresults to a value less than + or equal to zero results in error response code 400 (Bad Request). + include: + Optional. Specifies one or more datasets to include in the + response. To specify more than one of these options on the URI, + you must separate each option with a comma. Valid values are: + snapshots: + Specifies that snapshots should be included in the + enumeration. Snapshots are listed from oldest to newest in + the response. + metadata: + Specifies that blob metadata be returned in the response. + uncommittedblobs: + Specifies that blobs for which blocks have been uploaded, + but which have not been committed using Put Block List + (REST API), be included in the response. + copy: + Version 2012-02-12 and newer. Specifies that metadata + related to any current or previous Copy Blob operation + should be included in the response. + delimiter: + Optional. When the request includes this parameter, the operation + returns a BlobPrefix element in the response body that acts as a + placeholder for all blobs whose names begin with the same + substring up to the appearance of the delimiter character. The + delimiter may be a single character or a string. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '?restype=container&comp=list' + request.query = [ + ('prefix', _str_or_none(prefix)), + ('delimiter', _str_or_none(delimiter)), + ('marker', _str_or_none(marker)), + ('maxresults', _int_or_none(maxresults)), + ('include', _str_or_none(include)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_blob_enum_results_list(response) + + def set_blob_service_properties(self, storage_service_properties, + timeout=None): + ''' + Sets the properties of a storage account's Blob service, including + Windows Azure Storage Analytics. You can also use this operation to + set the default request version for all incoming requests that do not + have a version specified. + + storage_service_properties: a StorageServiceProperties object. + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + _validate_not_none('storage_service_properties', + storage_service_properties) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/?restype=service&comp=properties' + request.query = [('timeout', _int_or_none(timeout))] + request.body = _get_request_body( + _convert_class_to_xml(storage_service_properties)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def get_blob_service_properties(self, timeout=None): + ''' + Gets the properties of a storage account's Blob service, including + Windows Azure Storage Analytics. + + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/?restype=service&comp=properties' + request.query = [('timeout', _int_or_none(timeout))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response(response, StorageServiceProperties) + + def get_blob_properties(self, container_name, blob_name, + x_ms_lease_id=None): + ''' + Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'HEAD' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict(response) + + def set_blob_properties(self, container_name, blob_name, + x_ms_blob_cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_md5=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_lease_id=None): + ''' + Sets system properties on the blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + x_ms_blob_cache_control: + Optional. Modifies the cache control string for the blob. + x_ms_blob_content_type: Optional. Sets the blob's content type. + x_ms_blob_content_md5: Optional. Sets the blob's MD5 hash. + x_ms_blob_content_encoding: Optional. Sets the blob's content encoding. + x_ms_blob_content_language: Optional. Sets the blob's content language. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=properties' + request.headers = [ + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, + content_encoding=None, content_language=None, + content_md5=None, cache_control=None, + x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, x_ms_meta_name_values=None, + x_ms_lease_id=None, x_ms_blob_content_length=None, + x_ms_blob_sequence_number=None): + ''' + Creates a new block blob or page blob, or updates the content of an + existing block blob. + + See put_block_blob_from_* and put_page_blob_from_* for high level + functions that handle the creation and upload of large blobs with + automatic chunking and progress notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + blob: + For BlockBlob: + Content of blob as bytes (size < 64MB). For larger size, you + must call put_block and put_block_list to set content of blob. + For PageBlob: + Use None and call put_page to set content of blob. + x_ms_blob_type: Required. Could be BlockBlob or PageBlob. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + x_ms_blob_content_length: + Required for page blobs. This header specifies the maximum size + for the page blob, up to 1 TB. The page blob size must be aligned + to a 512-byte boundary. + x_ms_blob_sequence_number: + Optional. Set for page blobs only. The sequence number is a + user-controlled value that you can use to track requests. The + value of the sequence number must be between 0 and 2^63 - 1. The + default value is 0. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('x_ms_blob_type', x_ms_blob_type) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-blob-type', _str_or_none(x_ms_blob_type)), + ('Content-Encoding', _str_or_none(content_encoding)), + ('Content-Language', _str_or_none(content_language)), + ('Content-MD5', _str_or_none(content_md5)), + ('Cache-Control', _str_or_none(cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-blob-content-length', + _str_or_none(x_ms_blob_content_length)), + ('x-ms-blob-sequence-number', + _str_or_none(x_ms_blob_sequence_number)) + ] + request.body = _get_request_body_bytes_only('blob', blob) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def put_block_blob_from_path(self, container_name, blob_name, file_path, + content_encoding=None, content_language=None, + content_md5=None, cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, progress_callback=None): + ''' + Creates a new block blob from a file path, or updates the content of an + existing block blob, with automatic chunking and progress notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + file_path: Path of the file to upload as the blob content. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-blob-type', 'BlockBlob'), + ('Content-Encoding', _str_or_none(content_encoding)), + ('Content-Language', _str_or_none(content_language)), + ('Content-MD5', _str_or_none(content_md5)), + ('Cache-Control', _str_or_none(cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ] + + count = path.getsize(file_path) + with open(file_path, 'rb') as stream: + self.put_block_blob_from_file(container_name, + blob_name, + stream, + count, + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id, + progress_callback) + + def put_block_blob_from_file(self, container_name, blob_name, stream, + count=None, content_encoding=None, + content_language=None, content_md5=None, + cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, progress_callback=None): + ''' + Creates a new block blob from a file/stream, or updates the content of + an existing block blob, with automatic chunking and progress + notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + stream: Opened file/stream to upload as the blob content. + count: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-blob-type', 'BlockBlob'), + ('Content-Encoding', _str_or_none(content_encoding)), + ('Content-Language', _str_or_none(content_language)), + ('Content-MD5', _str_or_none(content_md5)), + ('Cache-Control', _str_or_none(cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ] + + if count and count < self._BLOB_MAX_DATA_SIZE: + if progress_callback: + progress_callback(0, count) + + data = stream.read(count) + self.put_blob(container_name, + blob_name, + data, + 'BlockBlob', + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id) + + if progress_callback: + progress_callback(count, count) + else: + if progress_callback: + progress_callback(0, count) + + self.put_blob(container_name, + blob_name, + None, + 'BlockBlob', + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id) + + remain_bytes = count + block_ids = [] + block_index = 0 + index = 0 + while True: + request_count = self._BLOB_MAX_CHUNK_DATA_SIZE\ + if remain_bytes is None else min( + remain_bytes, + self._BLOB_MAX_CHUNK_DATA_SIZE) + data = stream.read(request_count) + if data: + length = len(data) + index += length + remain_bytes = remain_bytes - \ + length if remain_bytes else None + block_id = '{0:08d}'.format(block_index) + self.put_block(container_name, blob_name, + data, block_id, x_ms_lease_id=x_ms_lease_id) + block_ids.append(block_id) + block_index += 1 + if progress_callback: + progress_callback(index, count) + else: + break + + self.put_block_list(container_name, blob_name, block_ids) + + def put_block_blob_from_bytes(self, container_name, blob_name, blob, + index=0, count=None, content_encoding=None, + content_language=None, content_md5=None, + cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, progress_callback=None): + ''' + Creates a new block blob from an array of bytes, or updates the content + of an existing block blob, with automatic chunking and progress + notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + blob: Content of blob as an array of bytes. + index: Start index in the array of bytes. + count: + Number of bytes to upload. Set to None or negative value to upload + all bytes starting from index. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('blob', blob) + _validate_not_none('index', index) + _validate_type_bytes('blob', blob) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-blob-type', 'BlockBlob'), + ('Content-Encoding', _str_or_none(content_encoding)), + ('Content-Language', _str_or_none(content_language)), + ('Content-MD5', _str_or_none(content_md5)), + ('Cache-Control', _str_or_none(cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ] + + if index < 0: + raise TypeError(_ERROR_VALUE_NEGATIVE.format('index')) + + if count is None or count < 0: + count = len(blob) - index + + if count < self._BLOB_MAX_DATA_SIZE: + if progress_callback: + progress_callback(0, count) + + data = blob[index: index + count] + self.put_blob(container_name, + blob_name, + data, + 'BlockBlob', + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id) + + if progress_callback: + progress_callback(count, count) + else: + stream = BytesIO(blob) + stream.seek(index) + + self.put_block_blob_from_file(container_name, + blob_name, + stream, + count, + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id, + progress_callback) + + def put_block_blob_from_text(self, container_name, blob_name, text, + text_encoding='utf-8', + content_encoding=None, content_language=None, + content_md5=None, cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, progress_callback=None): + ''' + Creates a new block blob from str/unicode, or updates the content of an + existing block blob, with automatic chunking and progress notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + text: Text to upload to the blob. + text_encoding: Encoding to use to convert the text to bytes. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('text', text) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-blob-type', 'BlockBlob'), + ('Content-Encoding', _str_or_none(content_encoding)), + ('Content-Language', _str_or_none(content_language)), + ('Content-MD5', _str_or_none(content_md5)), + ('Cache-Control', _str_or_none(cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ] + + if not isinstance(text, bytes): + _validate_not_none('text_encoding', text_encoding) + text = text.encode(text_encoding) + + self.put_block_blob_from_bytes(container_name, + blob_name, + text, + 0, + len(text), + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id, + progress_callback) + + def put_page_blob_from_path(self, container_name, blob_name, file_path, + content_encoding=None, content_language=None, + content_md5=None, cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, + x_ms_blob_sequence_number=None, + progress_callback=None): + ''' + Creates a new page blob from a file path, or updates the content of an + existing page blob, with automatic chunking and progress notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + file_path: Path of the file to upload as the blob content. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + x_ms_blob_sequence_number: + Optional. Set for page blobs only. The sequence number is a + user-controlled value that you can use to track requests. The + value of the sequence number must be between 0 and 2^63 - 1. The + default value is 0. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + + count = path.getsize(file_path) + with open(file_path, 'rb') as stream: + self.put_page_blob_from_file(container_name, + blob_name, + stream, + count, + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id, + x_ms_blob_sequence_number, + progress_callback) + + def put_page_blob_from_file(self, container_name, blob_name, stream, count, + content_encoding=None, content_language=None, + content_md5=None, cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, + x_ms_blob_sequence_number=None, + progress_callback=None): + ''' + Creates a new page blob from a file/stream, or updates the content of an + existing page blob, with automatic chunking and progress notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + stream: Opened file/stream to upload as the blob content. + count: + Number of bytes to read from the stream. This is required, a page + blob cannot be created if the count is unknown. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + x_ms_blob_sequence_number: + Optional. Set for page blobs only. The sequence number is a + user-controlled value that you can use to track requests. The + value of the sequence number must be between 0 and 2^63 - 1. The + default value is 0. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + _validate_not_none('count', count) + + if count < 0: + raise TypeError(_ERROR_VALUE_NEGATIVE.format('count')) + + if count % _PAGE_SIZE != 0: + raise TypeError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count)) + + if progress_callback: + progress_callback(0, count) + + self.put_blob(container_name, + blob_name, + b'', + 'PageBlob', + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id, + count, + x_ms_blob_sequence_number) + + remain_bytes = count + page_start = 0 + while True: + request_count = min(remain_bytes, self._BLOB_MAX_CHUNK_DATA_SIZE) + data = stream.read(request_count) + if data: + length = len(data) + remain_bytes = remain_bytes - length + page_end = page_start + length - 1 + self.put_page(container_name, + blob_name, + data, + 'bytes={0}-{1}'.format(page_start, page_end), + 'update', + x_ms_lease_id=x_ms_lease_id) + page_start = page_start + length + + if progress_callback: + progress_callback(page_start, count) + else: + break + + def put_page_blob_from_bytes(self, container_name, blob_name, blob, + index=0, count=None, content_encoding=None, + content_language=None, content_md5=None, + cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, + x_ms_blob_cache_control=None, + x_ms_meta_name_values=None, + x_ms_lease_id=None, + x_ms_blob_sequence_number=None, + progress_callback=None): + ''' + Creates a new page blob from an array of bytes, or updates the content + of an existing page blob, with automatic chunking and progress + notifications. + + container_name: Name of existing container. + blob_name: Name of blob to create or update. + blob: Content of blob as an array of bytes. + index: Start index in the array of bytes. + count: + Number of bytes to upload. Set to None or negative value to upload + all bytes starting from index. + content_encoding: + Optional. Specifies which content encodings have been applied to + the blob. This value is returned to the client when the Get Blob + (REST API) operation is performed on the blob resource. The client + can use this value when returned to decode the blob content. + content_language: + Optional. Specifies the natural languages used by this resource. + content_md5: + Optional. An MD5 hash of the blob content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. If the two hashes do not match, the + operation will fail with error code 400 (Bad Request). + cache_control: + Optional. The Blob service stores this value but does not use or + modify it. + x_ms_blob_content_type: Optional. Set the blob's content type. + x_ms_blob_content_encoding: Optional. Set the blob's content encoding. + x_ms_blob_content_language: Optional. Set the blob's content language. + x_ms_blob_content_md5: Optional. Set the blob's MD5 hash. + x_ms_blob_cache_control: Optional. Sets the blob's cache control. + x_ms_meta_name_values: A dict containing name, value for metadata. + x_ms_lease_id: Required if the blob has an active lease. + x_ms_blob_sequence_number: + Optional. Set for page blobs only. The sequence number is a + user-controlled value that you can use to track requests. The + value of the sequence number must be between 0 and 2^63 - 1. The + default value is 0. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('blob', blob) + _validate_type_bytes('blob', blob) + + if index < 0: + raise TypeError(_ERROR_VALUE_NEGATIVE.format('index')) + + if count is None or count < 0: + count = len(blob) - index + + stream = BytesIO(blob) + stream.seek(index) + + self.put_page_blob_from_file(container_name, + blob_name, + stream, + count, + content_encoding, + content_language, + content_md5, + cache_control, + x_ms_blob_content_type, + x_ms_blob_content_encoding, + x_ms_blob_content_language, + x_ms_blob_content_md5, + x_ms_blob_cache_control, + x_ms_meta_name_values, + x_ms_lease_id, + x_ms_blob_sequence_number, + progress_callback) + + def get_blob(self, container_name, blob_name, snapshot=None, + x_ms_range=None, x_ms_lease_id=None, + x_ms_range_get_content_md5=None): + ''' + Reads or downloads a blob from the system, including its metadata and + properties. + + See get_blob_to_* for high level functions that handle the download + of large blobs with automatic chunking and progress notifications. + + container_name: Name of existing container. + blob_name: Name of existing blob. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + x_ms_range: + Optional. Return only the bytes of the blob in the specified range. + x_ms_lease_id: Required if the blob has an active lease. + x_ms_range_get_content_md5: + Optional. When this header is set to true and specified together + with the Range header, the service returns the MD5 hash for the + range, as long as the range is less than or equal to 4 MB in size. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-range', _str_or_none(x_ms_range)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-range-get-content-md5', + _str_or_none(x_ms_range_get_content_md5)) + ] + request.query = [('snapshot', _str_or_none(snapshot))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request, None) + + return _create_blob_result(response) + + def get_blob_to_path(self, container_name, blob_name, file_path, + open_mode='wb', snapshot=None, x_ms_lease_id=None, + progress_callback=None): + ''' + Downloads a blob to a file path, with automatic chunking and progress + notifications. + + container_name: Name of existing container. + blob_name: Name of existing blob. + file_path: Path of file to write to. + open_mode: Mode to use when opening the file. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + _validate_not_none('open_mode', open_mode) + + with open(file_path, open_mode) as stream: + self.get_blob_to_file(container_name, + blob_name, + stream, + snapshot, + x_ms_lease_id, + progress_callback) + + def get_blob_to_file(self, container_name, blob_name, stream, + snapshot=None, x_ms_lease_id=None, + progress_callback=None): + ''' + Downloads a blob to a file/stream, with automatic chunking and progress + notifications. + + container_name: Name of existing container. + blob_name: Name of existing blob. + stream: Opened file/stream to write to. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + + props = self.get_blob_properties(container_name, blob_name) + blob_size = int(props['content-length']) + + if blob_size < self._BLOB_MAX_DATA_SIZE: + if progress_callback: + progress_callback(0, blob_size) + + data = self.get_blob(container_name, + blob_name, + snapshot, + x_ms_lease_id=x_ms_lease_id) + + stream.write(data) + + if progress_callback: + progress_callback(blob_size, blob_size) + else: + if progress_callback: + progress_callback(0, blob_size) + + index = 0 + while index < blob_size: + chunk_range = 'bytes={}-{}'.format( + index, + index + self._BLOB_MAX_CHUNK_DATA_SIZE - 1) + data = self.get_blob( + container_name, blob_name, x_ms_range=chunk_range) + length = len(data) + index += length + if length > 0: + stream.write(data) + if progress_callback: + progress_callback(index, blob_size) + if length < self._BLOB_MAX_CHUNK_DATA_SIZE: + break + else: + break + + def get_blob_to_bytes(self, container_name, blob_name, snapshot=None, + x_ms_lease_id=None, progress_callback=None): + ''' + Downloads a blob as an array of bytes, with automatic chunking and + progress notifications. + + container_name: Name of existing container. + blob_name: Name of existing blob. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + + stream = BytesIO() + self.get_blob_to_file(container_name, + blob_name, + stream, + snapshot, + x_ms_lease_id, + progress_callback) + + return stream.getvalue() + + def get_blob_to_text(self, container_name, blob_name, text_encoding='utf-8', + snapshot=None, x_ms_lease_id=None, + progress_callback=None): + ''' + Downloads a blob as unicode text, with automatic chunking and progress + notifications. + + container_name: Name of existing container. + blob_name: Name of existing blob. + text_encoding: Encoding to use when decoding the blob data. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + x_ms_lease_id: Required if the blob has an active lease. + progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('text_encoding', text_encoding) + + result = self.get_blob_to_bytes(container_name, + blob_name, + snapshot, + x_ms_lease_id, + progress_callback) + + return result.decode(text_encoding) + + def get_blob_metadata(self, container_name, blob_name, snapshot=None, + x_ms_lease_id=None): + ''' + Returns all user-defined metadata for the specified blob or snapshot. + + container_name: Name of existing container. + blob_name: Name of existing blob. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=metadata' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.query = [('snapshot', _str_or_none(snapshot))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_prefix(response, prefixes=['x-ms-meta']) + + def set_blob_metadata(self, container_name, blob_name, + x_ms_meta_name_values=None, x_ms_lease_id=None): + ''' + Sets user-defined metadata for the specified blob as one or more + name-value pairs. + + container_name: Name of existing container. + blob_name: Name of existing blob. + x_ms_meta_name_values: Dict containing name and value pairs. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=metadata' + request.headers = [ + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def lease_blob(self, container_name, blob_name, x_ms_lease_action, + x_ms_lease_id=None, x_ms_lease_duration=60, + x_ms_lease_break_period=None, x_ms_proposed_lease_id=None): + ''' + Establishes and manages a one-minute lock on a blob for write + operations. + + container_name: Name of existing container. + blob_name: Name of existing blob. + x_ms_lease_action: + Required. Possible values: acquire|renew|release|break|change + x_ms_lease_id: Required if the blob has an active lease. + x_ms_lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. For backwards compatibility, the default is + 60, and the value is only used on an acquire operation. + x_ms_lease_break_period: + Optional. For a break operation, this is the proposed duration of + seconds that the lease should continue before it is broken, between + 0 and 60 seconds. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining + on the lease is used. A new lease will not be available before the + break period has expired, but the lease may be held for longer than + the break period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + x_ms_proposed_lease_id: + Optional for acquire, required for change. Proposed lease ID, in a + GUID string format. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('x_ms_lease_action', x_ms_lease_action) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=lease' + request.headers = [ + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-lease-action', _str_or_none(x_ms_lease_action)), + ('x-ms-lease-duration', _str_or_none(x_ms_lease_duration\ + if x_ms_lease_action == 'acquire' else None)), + ('x-ms-lease-break-period', _str_or_none(x_ms_lease_break_period)), + ('x-ms-proposed-lease-id', _str_or_none(x_ms_proposed_lease_id)), + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_filter( + response, + filter=['x-ms-lease-id', 'x-ms-lease-time']) + + def snapshot_blob(self, container_name, blob_name, + x_ms_meta_name_values=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, + if_none_match=None, x_ms_lease_id=None): + ''' + Creates a read-only snapshot of a blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + x_ms_meta_name_values: Optional. Dict containing name and value pairs. + if_modified_since: Optional. Datetime string. + if_unmodified_since: DateTime string. + if_match: + Optional. snapshot the blob only if its ETag value matches the + value specified. + if_none_match: Optional. An ETag value + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=snapshot' + request.headers = [ + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('If-Modified-Since', _str_or_none(if_modified_since)), + ('If-Unmodified-Since', _str_or_none(if_unmodified_since)), + ('If-Match', _str_or_none(if_match)), + ('If-None-Match', _str_or_none(if_none_match)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_filter( + response, + filter=['x-ms-snapshot', 'etag', 'last-modified']) + + def copy_blob(self, container_name, blob_name, x_ms_copy_source, + x_ms_meta_name_values=None, + x_ms_source_if_modified_since=None, + x_ms_source_if_unmodified_since=None, + x_ms_source_if_match=None, x_ms_source_if_none_match=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, x_ms_lease_id=None, + x_ms_source_lease_id=None): + ''' + Copies a blob to a destination within the storage account. + + container_name: Name of existing container. + blob_name: Name of existing blob. + x_ms_copy_source: + URL up to 2 KB in length that specifies a blob. A source blob in + the same account can be private, but a blob in another account + must be public or accept credentials included in this URL, such as + a Shared Access Signature. Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime> + x_ms_meta_name_values: Optional. Dict containing name and value pairs. + x_ms_source_if_modified_since: + Optional. An ETag value. Specify this conditional header to copy + the source blob only if its ETag matches the value specified. + x_ms_source_if_unmodified_since: + Optional. An ETag value. Specify this conditional header to copy + the blob only if its ETag does not match the value specified. + x_ms_source_if_match: + Optional. A DateTime value. Specify this conditional header to + copy the blob only if the source blob has been modified since the + specified date/time. + x_ms_source_if_none_match: + Optional. An ETag value. Specify this conditional header to copy + the source blob only if its ETag matches the value specified. + if_modified_since: Optional. Datetime string. + if_unmodified_since: DateTime string. + if_match: + Optional. Snapshot the blob only if its ETag value matches the + value specified. + if_none_match: Optional. An ETag value + x_ms_lease_id: Required if the blob has an active lease. + x_ms_source_lease_id: + Optional. Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('x_ms_copy_source', x_ms_copy_source) + + if x_ms_copy_source.startswith('/'): + # Backwards compatibility for earlier versions of the SDK where + # the copy source can be in the following formats: + # - Blob in named container: + # /accountName/containerName/blobName + # - Snapshot in named container: + # /accountName/containerName/blobName?snapshot=<DateTime> + # - Blob in root container: + # /accountName/blobName + # - Snapshot in root container: + # /accountName/blobName?snapshot=<DateTime> + account, _, source =\ + x_ms_copy_source.partition('/')[2].partition('/') + x_ms_copy_source = self.protocol + '://' + \ + account + self.host_base + '/' + source + + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [ + ('x-ms-copy-source', _str_or_none(x_ms_copy_source)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-source-if-modified-since', + _str_or_none(x_ms_source_if_modified_since)), + ('x-ms-source-if-unmodified-since', + _str_or_none(x_ms_source_if_unmodified_since)), + ('x-ms-source-if-match', _str_or_none(x_ms_source_if_match)), + ('x-ms-source-if-none-match', + _str_or_none(x_ms_source_if_none_match)), + ('If-Modified-Since', _str_or_none(if_modified_since)), + ('If-Unmodified-Since', _str_or_none(if_unmodified_since)), + ('If-Match', _str_or_none(if_match)), + ('If-None-Match', _str_or_none(if_none_match)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-source-lease-id', _str_or_none(x_ms_source_lease_id)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict(response) + + def abort_copy_blob(self, container_name, blob_name, x_ms_copy_id, + x_ms_lease_id=None): + ''' + Aborts a pending copy_blob operation, and leaves a destination blob + with zero length and full metadata. + + container_name: Name of destination container. + blob_name: Name of destination blob. + x_ms_copy_id: + Copy identifier provided in the x-ms-copy-id of the original + copy_blob operation. + x_ms_lease_id: + Required if the destination blob has an active infinite lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('x_ms_copy_id', x_ms_copy_id) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + \ + _str(blob_name) + '?comp=copy©id=' + \ + _str(x_ms_copy_id) + request.headers = [ + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-copy-action', 'abort'), + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def delete_blob(self, container_name, blob_name, snapshot=None, + x_ms_lease_id=None): + ''' + Marks the specified blob or snapshot for deletion. The blob is later + deleted during garbage collection. + + To mark a specific snapshot for deletion provide the date/time of the + snapshot via the snapshot parameter. + + container_name: Name of existing container. + blob_name: Name of existing blob. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to delete. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(container_name) + '/' + _str(blob_name) + '' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.query = [('snapshot', _str_or_none(snapshot))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def put_block(self, container_name, blob_name, block, blockid, + content_md5=None, x_ms_lease_id=None): + ''' + Creates a new block to be committed as part of a blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + block: Content of the block. + blockid: + Required. A value that identifies the block. The string must be + less than or equal to 64 bytes in size. + content_md5: + Optional. An MD5 hash of the block content. This hash is used to + verify the integrity of the blob during transport. When this + header is specified, the storage service checks the hash that has + arrived with the one that was sent. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('block', block) + _validate_not_none('blockid', blockid) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=block' + request.headers = [ + ('Content-MD5', _str_or_none(content_md5)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)) + ] + request.query = [('blockid', _encode_base64(_str_or_none(blockid)))] + request.body = _get_request_body_bytes_only('block', block) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def put_block_list(self, container_name, blob_name, block_list, + content_md5=None, x_ms_blob_cache_control=None, + x_ms_blob_content_type=None, + x_ms_blob_content_encoding=None, + x_ms_blob_content_language=None, + x_ms_blob_content_md5=None, x_ms_meta_name_values=None, + x_ms_lease_id=None): + ''' + Writes a blob by specifying the list of block IDs that make up the + blob. In order to be written as part of a blob, a block must have been + successfully written to the server in a prior Put Block (REST API) + operation. + + container_name: Name of existing container. + blob_name: Name of existing blob. + block_list: A str list containing the block ids. + content_md5: + Optional. An MD5 hash of the block content. This hash is used to + verify the integrity of the blob during transport. When this header + is specified, the storage service checks the hash that has arrived + with the one that was sent. + x_ms_blob_cache_control: + Optional. Sets the blob's cache control. If specified, this + property is stored with the blob and returned with a read request. + x_ms_blob_content_type: + Optional. Sets the blob's content type. If specified, this property + is stored with the blob and returned with a read request. + x_ms_blob_content_encoding: + Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + x_ms_blob_content_language: + Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + x_ms_blob_content_md5: + Optional. An MD5 hash of the blob content. Note that this hash is + not validated, as the hashes for the individual blocks were + validated when each was uploaded. + x_ms_meta_name_values: Optional. Dict containing name and value pairs. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('block_list', block_list) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist' + request.headers = [ + ('Content-MD5', _str_or_none(content_md5)), + ('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)), + ('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)), + ('x-ms-blob-content-encoding', + _str_or_none(x_ms_blob_content_encoding)), + ('x-ms-blob-content-language', + _str_or_none(x_ms_blob_content_language)), + ('x-ms-blob-content-md5', _str_or_none(x_ms_blob_content_md5)), + ('x-ms-meta-name-values', x_ms_meta_name_values), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)) + ] + request.body = _get_request_body( + _convert_block_list_to_xml(block_list)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def get_block_list(self, container_name, blob_name, snapshot=None, + blocklisttype=None, x_ms_lease_id=None): + ''' + Retrieves the list of blocks that have been uploaded as part of a + block blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + snapshot: + Optional. Datetime to determine the time to retrieve the blocks. + blocklisttype: + Specifies whether to return the list of committed blocks, the list + of uncommitted blocks, or both lists together. Valid values are: + committed, uncommitted, or all. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=blocklist' + request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))] + request.query = [ + ('snapshot', _str_or_none(snapshot)), + ('blocklisttype', _str_or_none(blocklisttype)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _convert_response_to_block_list(response) + + def put_page(self, container_name, blob_name, page, x_ms_range, + x_ms_page_write, timeout=None, content_md5=None, + x_ms_lease_id=None, x_ms_if_sequence_number_lte=None, + x_ms_if_sequence_number_lt=None, + x_ms_if_sequence_number_eq=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None): + ''' + Writes a range of pages to a page blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + page: Content of the page. + x_ms_range: + Required. Specifies the range of bytes to be written as a page. + Both the start and end of the range must be specified. Must be in + format: bytes=startByte-endByte. Given that pages must be aligned + with 512-byte boundaries, the start offset must be a modulus of + 512 and the end offset must be a modulus of 512-1. Examples of + valid byte ranges are 0-511, 512-1023, etc. + x_ms_page_write: + Required. You may specify one of the following options: + update (lower case): + Writes the bytes specified by the request body into the + specified range. The Range and Content-Length headers must + match to perform the update. + clear (lower case): + Clears the specified range and releases the space used in + storage for that range. To clear a range, set the + Content-Length header to zero, and the Range header to a + value that indicates the range to clear, up to maximum + blob size. + timeout: the timeout parameter is expressed in seconds. + content_md5: + Optional. An MD5 hash of the page content. This hash is used to + verify the integrity of the page during transport. When this header + is specified, the storage service compares the hash of the content + that has arrived with the header value that was sent. If the two + hashes do not match, the operation will fail with error code 400 + (Bad Request). + x_ms_lease_id: Required if the blob has an active lease. + x_ms_if_sequence_number_lte: + Optional. If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + x_ms_if_sequence_number_lt: + Optional. If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + x_ms_if_sequence_number_eq: + Optional. If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + if_modified_since: + Optional. A DateTime value. Specify this conditional header to + write the page only if the blob has been modified since the + specified date/time. If the blob has not been modified, the Blob + service fails. + if_unmodified_since: + Optional. A DateTime value. Specify this conditional header to + write the page only if the blob has not been modified since the + specified date/time. If the blob has been modified, the Blob + service fails. + if_match: + Optional. An ETag value. Specify an ETag value for this conditional + header to write the page only if the blob's ETag value matches the + value specified. If the values do not match, the Blob service fails. + if_none_match: + Optional. An ETag value. Specify an ETag value for this conditional + header to write the page only if the blob's ETag value does not + match the value specified. If the values are identical, the Blob + service fails. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('page', page) + _validate_not_none('x_ms_range', x_ms_range) + _validate_not_none('x_ms_page_write', x_ms_page_write) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=page' + request.headers = [ + ('x-ms-range', _str_or_none(x_ms_range)), + ('Content-MD5', _str_or_none(content_md5)), + ('x-ms-page-write', _str_or_none(x_ms_page_write)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)), + ('x-ms-if-sequence-number-le', + _str_or_none(x_ms_if_sequence_number_lte)), + ('x-ms-if-sequence-number-lt', + _str_or_none(x_ms_if_sequence_number_lt)), + ('x-ms-if-sequence-number-eq', + _str_or_none(x_ms_if_sequence_number_eq)), + ('If-Modified-Since', _str_or_none(if_modified_since)), + ('If-Unmodified-Since', _str_or_none(if_unmodified_since)), + ('If-Match', _str_or_none(if_match)), + ('If-None-Match', _str_or_none(if_none_match)) + ] + request.query = [('timeout', _int_or_none(timeout))] + request.body = _get_request_body_bytes_only('page', page) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def get_page_ranges(self, container_name, blob_name, snapshot=None, + range=None, x_ms_range=None, x_ms_lease_id=None): + ''' + Retrieves the page ranges for a blob. + + container_name: Name of existing container. + blob_name: Name of existing blob. + snapshot: + Optional. The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve information + from. + range: + Optional. Specifies the range of bytes over which to list ranges, + inclusively. If omitted, then all ranges for the blob are returned. + x_ms_range: + Optional. Specifies the range of bytes to be written as a page. + Both the start and end of the range must be specified. Must be in + format: bytes=startByte-endByte. Given that pages must be aligned + with 512-byte boundaries, the start offset must be a modulus of + 512 and the end offset must be a modulus of 512-1. Examples of + valid byte ranges are 0-511, 512-1023, etc. + x_ms_lease_id: Required if the blob has an active lease. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + \ + _str(container_name) + '/' + _str(blob_name) + '?comp=pagelist' + request.headers = [ + ('Range', _str_or_none(range)), + ('x-ms-range', _str_or_none(x_ms_range)), + ('x-ms-lease-id', _str_or_none(x_ms_lease_id)) + ] + request.query = [('snapshot', _str_or_none(snapshot))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_blob_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_simple_list(response, PageList, PageRange, "page_ranges") diff --git a/awx/lib/site-packages/azure/storage/cloudstorageaccount.py b/awx/lib/site-packages/azure/storage/cloudstorageaccount.py new file mode 100644 index 0000000000..e043f898cd --- /dev/null +++ b/awx/lib/site-packages/azure/storage/cloudstorageaccount.py @@ -0,0 +1,39 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure.storage.blobservice import BlobService +from azure.storage.tableservice import TableService +from azure.storage.queueservice import QueueService + + +class CloudStorageAccount(object): + + """ + Provides a factory for creating the blob, queue, and table services + with a common account name and account key. Users can either use the + factory or can construct the appropriate service directly. + """ + + def __init__(self, account_name=None, account_key=None): + self.account_name = account_name + self.account_key = account_key + + def create_blob_service(self): + return BlobService(self.account_name, self.account_key) + + def create_table_service(self): + return TableService(self.account_name, self.account_key) + + def create_queue_service(self): + return QueueService(self.account_name, self.account_key) diff --git a/awx/lib/site-packages/azure/storage/queueservice.py b/awx/lib/site-packages/azure/storage/queueservice.py new file mode 100644 index 0000000000..fdde5fafd9 --- /dev/null +++ b/awx/lib/site-packages/azure/storage/queueservice.py @@ -0,0 +1,458 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure import ( + WindowsAzureConflictError, + WindowsAzureError, + DEV_QUEUE_HOST, + QUEUE_SERVICE_HOST_BASE, + xml_escape, + _convert_class_to_xml, + _dont_fail_not_exist, + _dont_fail_on_exist, + _get_request_body, + _int_or_none, + _parse_enum_results_list, + _parse_response, + _parse_response_for_dict_filter, + _parse_response_for_dict_prefix, + _str, + _str_or_none, + _update_request_uri_query_local_storage, + _validate_not_none, + _ERROR_CONFLICT, + ) +from azure.http import ( + HTTPRequest, + HTTP_RESPONSE_NO_CONTENT, + ) +from azure.storage import ( + Queue, + QueueEnumResults, + QueueMessagesList, + StorageServiceProperties, + _update_storage_queue_header, + ) +from azure.storage.storageclient import _StorageClient + + +class QueueService(_StorageClient): + + ''' + This is the main class managing queue resources. + ''' + + def __init__(self, account_name=None, account_key=None, protocol='https', + host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST): + ''' + account_name: your storage account name, required for all operations. + account_key: your storage account key, required for all operations. + protocol: Optional. Protocol. Defaults to http. + host_base: + Optional. Live host base url. Defaults to Azure url. Override this + for on-premise. + dev_host: Optional. Dev host url. Defaults to localhost. + ''' + super(QueueService, self).__init__( + account_name, account_key, protocol, host_base, dev_host) + + def get_queue_service_properties(self, timeout=None): + ''' + Gets the properties of a storage account's Queue Service, including + Windows Azure Storage Analytics. + + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/?restype=service&comp=properties' + request.query = [('timeout', _int_or_none(timeout))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response(response, StorageServiceProperties) + + def list_queues(self, prefix=None, marker=None, maxresults=None, + include=None): + ''' + Lists all of the queues in a given storage account. + + prefix: + Filters the results to return only queues with names that begin + with the specified prefix. + marker: + A string value that identifies the portion of the list to be + returned with the next list operation. The operation returns a + NextMarker element within the response body if the list returned + was not complete. This value may then be used as a query parameter + in a subsequent call to request the next portion of the list of + queues. The marker value is opaque to the client. + maxresults: + Specifies the maximum number of queues to return. If maxresults is + not specified, the server will return up to 5,000 items. + include: + Optional. Include this parameter to specify that the container's + metadata be returned as part of the response body. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/?comp=list' + request.query = [ + ('prefix', _str_or_none(prefix)), + ('marker', _str_or_none(marker)), + ('maxresults', _int_or_none(maxresults)), + ('include', _str_or_none(include)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_enum_results_list( + response, QueueEnumResults, "Queues", Queue) + + def create_queue(self, queue_name, x_ms_meta_name_values=None, + fail_on_exist=False): + ''' + Creates a queue under the given account. + + queue_name: name of the queue. + x_ms_meta_name_values: + Optional. A dict containing name-value pairs to associate with the + queue as metadata. + fail_on_exist: Specify whether throw exception when queue exists. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '' + request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + if not fail_on_exist: + try: + response = self._perform_request(request) + if response.status == HTTP_RESPONSE_NO_CONTENT: + return False + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + response = self._perform_request(request) + if response.status == HTTP_RESPONSE_NO_CONTENT: + raise WindowsAzureConflictError( + _ERROR_CONFLICT.format(response.message)) + return True + + def delete_queue(self, queue_name, fail_not_exist=False): + ''' + Permanently deletes the specified queue. + + queue_name: Name of the queue. + fail_not_exist: + Specify whether throw exception when queue doesn't exist. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '' + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_queue_metadata(self, queue_name): + ''' + Retrieves user-defined metadata and queue properties on the specified + queue. Metadata is associated with the queue as name-values pairs. + + queue_name: Name of the queue. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '?comp=metadata' + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_prefix( + response, + prefixes=['x-ms-meta', 'x-ms-approximate-messages-count']) + + def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None): + ''' + Sets user-defined metadata on the specified queue. Metadata is + associated with the queue as name-value pairs. + + queue_name: Name of the queue. + x_ms_meta_name_values: + Optional. A dict containing name-value pairs to associate with the + queue as metadata. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '?comp=metadata' + request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def put_message(self, queue_name, message_text, visibilitytimeout=None, + messagettl=None): + ''' + Adds a new message to the back of the message queue. A visibility + timeout can also be specified to make the message invisible until the + visibility timeout expires. A message must be in a format that can be + included in an XML request with UTF-8 encoding. The encoded message can + be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size + for previous versions. + + queue_name: Name of the queue. + message_text: Message content. + visibilitytimeout: + Optional. If not specified, the default value is 0. Specifies the + new visibility timeout value, in seconds, relative to server time. + The new value must be larger than or equal to 0, and cannot be + larger than 7 days. The visibility timeout of a message cannot be + set to a value later than the expiry time. visibilitytimeout + should be set to a value smaller than the time-to-live value. + messagettl: + Optional. Specifies the time-to-live interval for the message, in + seconds. The maximum time-to-live allowed is 7 days. If this + parameter is omitted, the default time-to-live is 7 days. + ''' + _validate_not_none('queue_name', queue_name) + _validate_not_none('message_text', message_text) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages' + request.query = [ + ('visibilitytimeout', _str_or_none(visibilitytimeout)), + ('messagettl', _str_or_none(messagettl)) + ] + request.body = _get_request_body( + '<?xml version="1.0" encoding="utf-8"?> \ +<QueueMessage> \ + <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \ +</QueueMessage>') + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def get_messages(self, queue_name, numofmessages=None, + visibilitytimeout=None): + ''' + Retrieves one or more messages from the front of the queue. + + queue_name: Name of the queue. + numofmessages: + Optional. A nonzero integer value that specifies the number of + messages to retrieve from the queue, up to a maximum of 32. If + fewer are visible, the visible messages are returned. By default, + a single message is retrieved from the queue with this operation. + visibilitytimeout: + Specifies the new visibility timeout value, in seconds, relative + to server time. The new value must be larger than or equal to 1 + second, and cannot be larger than 7 days, or larger than 2 hours + on REST protocol versions prior to version 2011-08-18. The + visibility timeout of a message can be set to a value later than + the expiry time. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages' + request.query = [ + ('numofmessages', _str_or_none(numofmessages)), + ('visibilitytimeout', _str_or_none(visibilitytimeout)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response(response, QueueMessagesList) + + def peek_messages(self, queue_name, numofmessages=None): + ''' + Retrieves one or more messages from the front of the queue, but does + not alter the visibility of the message. + + queue_name: Name of the queue. + numofmessages: + Optional. A nonzero integer value that specifies the number of + messages to peek from the queue, up to a maximum of 32. By default, + a single message is peeked from the queue with this operation. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages?peekonly=true' + request.query = [('numofmessages', _str_or_none(numofmessages))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response(response, QueueMessagesList) + + def delete_message(self, queue_name, message_id, popreceipt): + ''' + Deletes the specified message. + + queue_name: Name of the queue. + message_id: Message to delete. + popreceipt: + Required. A valid pop receipt value returned from an earlier call + to the Get Messages or Update Message operation. + ''' + _validate_not_none('queue_name', queue_name) + _validate_not_none('message_id', message_id) + _validate_not_none('popreceipt', popreceipt) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + \ + _str(queue_name) + '/messages/' + _str(message_id) + '' + request.query = [('popreceipt', _str_or_none(popreceipt))] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def clear_messages(self, queue_name): + ''' + Deletes all messages from the specified queue. + + queue_name: Name of the queue. + ''' + _validate_not_none('queue_name', queue_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + _str(queue_name) + '/messages' + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + self._perform_request(request) + + def update_message(self, queue_name, message_id, message_text, popreceipt, + visibilitytimeout): + ''' + Updates the visibility timeout of a message. You can also use this + operation to update the contents of a message. + + queue_name: Name of the queue. + message_id: Message to update. + message_text: Content of message. + popreceipt: + Required. A valid pop receipt value returned from an earlier call + to the Get Messages or Update Message operation. + visibilitytimeout: + Required. Specifies the new visibility timeout value, in seconds, + relative to server time. The new value must be larger than or equal + to 0, and cannot be larger than 7 days. The visibility timeout of a + message cannot be set to a value later than the expiry time. A + message can be updated until it has been deleted or has expired. + ''' + _validate_not_none('queue_name', queue_name) + _validate_not_none('message_id', message_id) + _validate_not_none('message_text', message_text) + _validate_not_none('popreceipt', popreceipt) + _validate_not_none('visibilitytimeout', visibilitytimeout) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(queue_name) + '/messages/' + _str(message_id) + '' + request.query = [ + ('popreceipt', _str_or_none(popreceipt)), + ('visibilitytimeout', _str_or_none(visibilitytimeout)) + ] + request.body = _get_request_body( + '<?xml version="1.0" encoding="utf-8"?> \ +<QueueMessage> \ + <MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \ +</QueueMessage>') + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + response = self._perform_request(request) + + return _parse_response_for_dict_filter( + response, + filter=['x-ms-popreceipt', 'x-ms-time-next-visible']) + + def set_queue_service_properties(self, storage_service_properties, + timeout=None): + ''' + Sets the properties of a storage account's Queue service, including + Windows Azure Storage Analytics. + + storage_service_properties: StorageServiceProperties object. + timeout: Optional. The timeout parameter is expressed in seconds. + ''' + _validate_not_none('storage_service_properties', + storage_service_properties) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/?restype=service&comp=properties' + request.query = [('timeout', _int_or_none(timeout))] + request.body = _get_request_body( + _convert_class_to_xml(storage_service_properties)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_queue_header( + request, self.account_name, self.account_key) + self._perform_request(request) diff --git a/awx/lib/site-packages/azure/storage/sharedaccesssignature.py b/awx/lib/site-packages/azure/storage/sharedaccesssignature.py new file mode 100644 index 0000000000..a882461cd4 --- /dev/null +++ b/awx/lib/site-packages/azure/storage/sharedaccesssignature.py @@ -0,0 +1,230 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure import url_quote +from azure.storage import _sign_string, X_MS_VERSION + +#------------------------------------------------------------------------- +# Constants for the share access signature +SIGNED_START = 'st' +SIGNED_EXPIRY = 'se' +SIGNED_RESOURCE = 'sr' +SIGNED_PERMISSION = 'sp' +SIGNED_IDENTIFIER = 'si' +SIGNED_SIGNATURE = 'sig' +SIGNED_VERSION = 'sv' +RESOURCE_BLOB = 'b' +RESOURCE_CONTAINER = 'c' +SIGNED_RESOURCE_TYPE = 'resource' +SHARED_ACCESS_PERMISSION = 'permission' + +#-------------------------------------------------------------------------- + + +class WebResource(object): + + ''' + Class that stands for the resource to get the share access signature + + path: the resource path. + properties: dict of name and values. Contains 2 item: resource type and + permission + request_url: the url of the webresource include all the queries. + ''' + + def __init__(self, path=None, request_url=None, properties=None): + self.path = path + self.properties = properties or {} + self.request_url = request_url + + +class Permission(object): + + ''' + Permission class. Contains the path and query_string for the path. + + path: the resource path + query_string: dict of name, values. Contains SIGNED_START, SIGNED_EXPIRY + SIGNED_RESOURCE, SIGNED_PERMISSION, SIGNED_IDENTIFIER, + SIGNED_SIGNATURE name values. + ''' + + def __init__(self, path=None, query_string=None): + self.path = path + self.query_string = query_string + + +class SharedAccessPolicy(object): + + ''' SharedAccessPolicy class. ''' + + def __init__(self, access_policy, signed_identifier=None): + self.id = signed_identifier + self.access_policy = access_policy + + +class SharedAccessSignature(object): + + ''' + The main class used to do the signing and generating the signature. + + account_name: + the storage account name used to generate shared access signature + account_key: the access key to genenerate share access signature + permission_set: the permission cache used to signed the request url. + ''' + + def __init__(self, account_name, account_key, permission_set=None): + self.account_name = account_name + self.account_key = account_key + self.permission_set = permission_set + + def generate_signed_query_string(self, path, resource_type, + shared_access_policy, + version=X_MS_VERSION): + ''' + Generates the query string for path, resource type and shared access + policy. + + path: the resource + resource_type: could be blob or container + shared_access_policy: shared access policy + version: + x-ms-version for storage service, or None to get a signed query + string compatible with pre 2012-02-12 clients, where the version + is not included in the query string. + ''' + + query_string = {} + if shared_access_policy.access_policy.start: + query_string[ + SIGNED_START] = shared_access_policy.access_policy.start + + if version: + query_string[SIGNED_VERSION] = version + query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry + query_string[SIGNED_RESOURCE] = resource_type + query_string[ + SIGNED_PERMISSION] = shared_access_policy.access_policy.permission + + if shared_access_policy.id: + query_string[SIGNED_IDENTIFIER] = shared_access_policy.id + + query_string[SIGNED_SIGNATURE] = self._generate_signature( + path, shared_access_policy, version) + return query_string + + def sign_request(self, web_resource): + ''' sign request to generate request_url with sharedaccesssignature + info for web_resource.''' + + if self.permission_set: + for shared_access_signature in self.permission_set: + if self._permission_matches_request( + shared_access_signature, web_resource, + web_resource.properties[ + SIGNED_RESOURCE_TYPE], + web_resource.properties[SHARED_ACCESS_PERMISSION]): + if web_resource.request_url.find('?') == -1: + web_resource.request_url += '?' + else: + web_resource.request_url += '&' + + web_resource.request_url += self._convert_query_string( + shared_access_signature.query_string) + break + return web_resource + + def _convert_query_string(self, query_string): + ''' Converts query string to str. The order of name, values is very + important and can't be wrong.''' + + convert_str = '' + if SIGNED_START in query_string: + convert_str += SIGNED_START + '=' + \ + url_quote(query_string[SIGNED_START]) + '&' + convert_str += SIGNED_EXPIRY + '=' + \ + url_quote(query_string[SIGNED_EXPIRY]) + '&' + convert_str += SIGNED_PERMISSION + '=' + \ + query_string[SIGNED_PERMISSION] + '&' + convert_str += SIGNED_RESOURCE + '=' + \ + query_string[SIGNED_RESOURCE] + '&' + + if SIGNED_IDENTIFIER in query_string: + convert_str += SIGNED_IDENTIFIER + '=' + \ + query_string[SIGNED_IDENTIFIER] + '&' + if SIGNED_VERSION in query_string: + convert_str += SIGNED_VERSION + '=' + \ + query_string[SIGNED_VERSION] + '&' + convert_str += SIGNED_SIGNATURE + '=' + \ + url_quote(query_string[SIGNED_SIGNATURE]) + '&' + return convert_str + + def _generate_signature(self, path, shared_access_policy, version): + ''' Generates signature for a given path and shared access policy. ''' + + def get_value_to_append(value, no_new_line=False): + return_value = '' + if value: + return_value = value + if not no_new_line: + return_value += '\n' + return return_value + + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/' + self.account_name + path + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (get_value_to_append(shared_access_policy.access_policy.permission) + + get_value_to_append(shared_access_policy.access_policy.start) + + get_value_to_append(shared_access_policy.access_policy.expiry) + + get_value_to_append(canonicalized_resource)) + + if version: + string_to_sign += get_value_to_append(shared_access_policy.id) + string_to_sign += get_value_to_append(version, True) + else: + string_to_sign += get_value_to_append(shared_access_policy.id, True) + + return self._sign(string_to_sign) + + def _permission_matches_request(self, shared_access_signature, + web_resource, resource_type, + required_permission): + ''' Check whether requested permission matches given + shared_access_signature, web_resource and resource type. ''' + + required_resource_type = resource_type + if required_resource_type == RESOURCE_BLOB: + required_resource_type += RESOURCE_CONTAINER + + for name, value in shared_access_signature.query_string.items(): + if name == SIGNED_RESOURCE and \ + required_resource_type.find(value) == -1: + return False + elif name == SIGNED_PERMISSION and \ + required_permission.find(value) == -1: + return False + + return web_resource.path.find(shared_access_signature.path) != -1 + + def _sign(self, string_to_sign): + ''' use HMAC-SHA256 to sign the string and convert it as base64 + encoded string. ''' + + return _sign_string(self.account_key, string_to_sign) diff --git a/awx/lib/site-packages/azure/storage/storageclient.py b/awx/lib/site-packages/azure/storage/storageclient.py new file mode 100644 index 0000000000..7f160faff9 --- /dev/null +++ b/awx/lib/site-packages/azure/storage/storageclient.py @@ -0,0 +1,152 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +import os +import sys + +from azure import ( + WindowsAzureError, + DEV_ACCOUNT_NAME, + DEV_ACCOUNT_KEY, + _ERROR_STORAGE_MISSING_INFO, + ) +from azure.http import HTTPError +from azure.http.httpclient import _HTTPClient +from azure.storage import _storage_error_handler + +#-------------------------------------------------------------------------- +# constants for azure app setting environment variables +AZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT' +AZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY' +EMULATED = 'EMULATED' + +#-------------------------------------------------------------------------- + + +class _StorageClient(object): + + ''' + This is the base class for BlobManager, TableManager and QueueManager. + ''' + + def __init__(self, account_name=None, account_key=None, protocol='https', + host_base='', dev_host=''): + ''' + account_name: your storage account name, required for all operations. + account_key: your storage account key, required for all operations. + protocol: Optional. Protocol. Defaults to http. + host_base: + Optional. Live host base url. Defaults to Azure url. Override this + for on-premise. + dev_host: Optional. Dev host url. Defaults to localhost. + ''' + self.account_name = account_name + self.account_key = account_key + self.requestid = None + self.protocol = protocol + self.host_base = host_base + self.dev_host = dev_host + + # the app is not run in azure emulator or use default development + # storage account and key if app is run in emulator. + self.use_local_storage = False + + # check whether it is run in emulator. + if EMULATED in os.environ: + self.is_emulated = os.environ[EMULATED].lower() != 'false' + else: + self.is_emulated = False + + # get account_name and account key. If they are not set when + # constructing, get the account and key from environment variables if + # the app is not run in azure emulator or use default development + # storage account and key if app is run in emulator. + if not self.account_name or not self.account_key: + if self.is_emulated: + self.account_name = DEV_ACCOUNT_NAME + self.account_key = DEV_ACCOUNT_KEY + self.protocol = 'http' + self.use_local_storage = True + else: + self.account_name = os.environ.get(AZURE_STORAGE_ACCOUNT) + self.account_key = os.environ.get(AZURE_STORAGE_ACCESS_KEY) + + if not self.account_name or not self.account_key: + raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO) + + self._httpclient = _HTTPClient( + service_instance=self, + account_key=self.account_key, + account_name=self.account_name, + protocol=self.protocol) + self._batchclient = None + self._filter = self._perform_request_worker + + def with_filter(self, filter): + ''' + Returns a new service which will process requests with the specified + filter. Filtering operations can include logging, automatic retrying, + etc... The filter is a lambda which receives the HTTPRequest and + another lambda. The filter can perform any pre-processing on the + request, pass it off to the next lambda, and then perform any + post-processing on the response. + ''' + res = type(self)(self.account_name, self.account_key, self.protocol) + old_filter = self._filter + + def new_filter(request): + return filter(request, old_filter) + + res._filter = new_filter + return res + + def set_proxy(self, host, port, user=None, password=None): + ''' + Sets the proxy server host and port for the HTTP CONNECT Tunnelling. + + host: Address of the proxy. Ex: '192.168.0.100' + port: Port of the proxy. Ex: 6000 + user: User for proxy authorization. + password: Password for proxy authorization. + ''' + self._httpclient.set_proxy(host, port, user, password) + + def _get_host(self): + if self.use_local_storage: + return self.dev_host + else: + return self.account_name + self.host_base + + def _perform_request_worker(self, request): + return self._httpclient.perform_request(request) + + def _perform_request(self, request, text_encoding='utf-8'): + ''' + Sends the request and return response. Catches HTTPError and hand it + to error handler + ''' + try: + if self._batchclient is not None: + return self._batchclient.insert_request_to_batch(request) + else: + resp = self._filter(request) + + if sys.version_info >= (3,) and isinstance(resp, bytes) and \ + text_encoding: + resp = resp.decode(text_encoding) + + except HTTPError as ex: + _storage_error_handler(ex) + + return resp diff --git a/awx/lib/site-packages/azure/storage/tableservice.py b/awx/lib/site-packages/azure/storage/tableservice.py new file mode 100644 index 0000000000..3fe58a73f1 --- /dev/null +++ b/awx/lib/site-packages/azure/storage/tableservice.py @@ -0,0 +1,491 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#-------------------------------------------------------------------------- +from azure import ( + WindowsAzureError, + TABLE_SERVICE_HOST_BASE, + DEV_TABLE_HOST, + _convert_class_to_xml, + _convert_response_to_feeds, + _dont_fail_not_exist, + _dont_fail_on_exist, + _get_request_body, + _int_or_none, + _parse_response, + _parse_response_for_dict, + _parse_response_for_dict_filter, + _str, + _str_or_none, + _update_request_uri_query_local_storage, + _validate_not_none, + ) +from azure.http import HTTPRequest +from azure.http.batchclient import _BatchClient +from azure.storage import ( + StorageServiceProperties, + _convert_entity_to_xml, + _convert_response_to_entity, + _convert_table_to_xml, + _convert_xml_to_entity, + _convert_xml_to_table, + _sign_storage_table_request, + _update_storage_table_header, + ) +from azure.storage.storageclient import _StorageClient + + +class TableService(_StorageClient): + + ''' + This is the main class managing Table resources. + ''' + + def __init__(self, account_name=None, account_key=None, protocol='https', + host_base=TABLE_SERVICE_HOST_BASE, dev_host=DEV_TABLE_HOST): + ''' + account_name: your storage account name, required for all operations. + account_key: your storage account key, required for all operations. + protocol: Optional. Protocol. Defaults to http. + host_base: + Optional. Live host base url. Defaults to Azure url. Override this + for on-premise. + dev_host: Optional. Dev host url. Defaults to localhost. + ''' + super(TableService, self).__init__( + account_name, account_key, protocol, host_base, dev_host) + + def begin_batch(self): + if self._batchclient is None: + self._batchclient = _BatchClient( + service_instance=self, + account_key=self.account_key, + account_name=self.account_name) + return self._batchclient.begin_batch() + + def commit_batch(self): + try: + ret = self._batchclient.commit_batch() + finally: + self._batchclient = None + return ret + + def cancel_batch(self): + self._batchclient = None + + def get_table_service_properties(self): + ''' + Gets the properties of a storage account's Table service, including + Windows Azure Storage Analytics. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/?restype=service&comp=properties' + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _parse_response(response, StorageServiceProperties) + + def set_table_service_properties(self, storage_service_properties): + ''' + Sets the properties of a storage account's Table Service, including + Windows Azure Storage Analytics. + + storage_service_properties: StorageServiceProperties object. + ''' + _validate_not_none('storage_service_properties', + storage_service_properties) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/?restype=service&comp=properties' + request.body = _get_request_body( + _convert_class_to_xml(storage_service_properties)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _parse_response_for_dict(response) + + def query_tables(self, table_name=None, top=None, next_table_name=None): + ''' + Returns a list of tables under the specified account. + + table_name: Optional. The specific table to query. + top: Optional. Maximum number of tables to return. + next_table_name: + Optional. When top is used, the next table name is stored in + result.x_ms_continuation['NextTableName'] + ''' + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + if table_name is not None: + uri_part_table_name = "('" + table_name + "')" + else: + uri_part_table_name = "" + request.path = '/Tables' + uri_part_table_name + '' + request.query = [ + ('$top', _int_or_none(top)), + ('NextTableName', _str_or_none(next_table_name)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _convert_response_to_feeds(response, _convert_xml_to_table) + + def create_table(self, table, fail_on_exist=False): + ''' + Creates a new table in the storage account. + + table: + Name of the table to create. Table name may contain only + alphanumeric characters and cannot begin with a numeric character. + It is case-insensitive and must be from 3 to 63 characters long. + fail_on_exist: Specify whether throw exception when table exists. + ''' + _validate_not_none('table', table) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/Tables' + request.body = _get_request_body(_convert_table_to_xml(table)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + if not fail_on_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def delete_table(self, table_name, fail_not_exist=False): + ''' + table_name: Name of the table to delete. + fail_not_exist: + Specify whether throw exception when table doesn't exist. + ''' + _validate_not_none('table_name', table_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/Tables(\'' + _str(table_name) + '\')' + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + if not fail_not_exist: + try: + self._perform_request(request) + return True + except WindowsAzureError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_entity(self, table_name, partition_key, row_key, select=''): + ''' + Get an entity in a table; includes the $select options. + + partition_key: PartitionKey of the entity. + row_key: RowKey of the entity. + select: Property names to select. + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('partition_key', partition_key) + _validate_not_none('row_key', row_key) + _validate_not_none('select', select) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(table_name) + \ + '(PartitionKey=\'' + _str(partition_key) + \ + '\',RowKey=\'' + \ + _str(row_key) + '\')?$select=' + \ + _str(select) + '' + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _convert_response_to_entity(response) + + def query_entities(self, table_name, filter=None, select=None, top=None, + next_partition_key=None, next_row_key=None): + ''' + Get entities in a table; includes the $filter and $select options. + + table_name: Table to query. + filter: + Optional. Filter as described at + http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx + select: Optional. Property names to select from the entities. + top: Optional. Maximum number of entities to return. + next_partition_key: + Optional. When top is used, the next partition key is stored in + result.x_ms_continuation['NextPartitionKey'] + next_row_key: + Optional. When top is used, the next partition key is stored in + result.x_ms_continuation['NextRowKey'] + ''' + _validate_not_none('table_name', table_name) + request = HTTPRequest() + request.method = 'GET' + request.host = self._get_host() + request.path = '/' + _str(table_name) + '()' + request.query = [ + ('$filter', _str_or_none(filter)), + ('$select', _str_or_none(select)), + ('$top', _int_or_none(top)), + ('NextPartitionKey', _str_or_none(next_partition_key)), + ('NextRowKey', _str_or_none(next_row_key)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _convert_response_to_feeds(response, _convert_xml_to_entity) + + def insert_entity(self, table_name, entity, + content_type='application/atom+xml'): + ''' + Inserts a new entity into a table. + + table_name: Table name. + entity: + Required. The entity object to insert. Could be a dict format or + entity object. + content_type: Required. Must be set to application/atom+xml + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('entity', entity) + _validate_not_none('content_type', content_type) + request = HTTPRequest() + request.method = 'POST' + request.host = self._get_host() + request.path = '/' + _str(table_name) + '' + request.headers = [('Content-Type', _str_or_none(content_type))] + request.body = _get_request_body(_convert_entity_to_xml(entity)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _convert_response_to_entity(response) + + def update_entity(self, table_name, partition_key, row_key, entity, + content_type='application/atom+xml', if_match='*'): + ''' + Updates an existing entity in a table. The Update Entity operation + replaces the entire entity and can be used to remove properties. + + table_name: Table name. + partition_key: PartitionKey of the entity. + row_key: RowKey of the entity. + entity: + Required. The entity object to insert. Could be a dict format or + entity object. + content_type: Required. Must be set to application/atom+xml + if_match: + Optional. Specifies the condition for which the merge should be + performed. To force an unconditional merge, set to the wildcard + character (*). + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('partition_key', partition_key) + _validate_not_none('row_key', row_key) + _validate_not_none('entity', entity) + _validate_not_none('content_type', content_type) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(table_name) + '(PartitionKey=\'' + \ + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')' + request.headers = [ + ('Content-Type', _str_or_none(content_type)), + ('If-Match', _str_or_none(if_match)) + ] + request.body = _get_request_body(_convert_entity_to_xml(entity)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _parse_response_for_dict_filter(response, filter=['etag']) + + def merge_entity(self, table_name, partition_key, row_key, entity, + content_type='application/atom+xml', if_match='*'): + ''' + Updates an existing entity by updating the entity's properties. This + operation does not replace the existing entity as the Update Entity + operation does. + + table_name: Table name. + partition_key: PartitionKey of the entity. + row_key: RowKey of the entity. + entity: + Required. The entity object to insert. Can be a dict format or + entity object. + content_type: Required. Must be set to application/atom+xml + if_match: + Optional. Specifies the condition for which the merge should be + performed. To force an unconditional merge, set to the wildcard + character (*). + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('partition_key', partition_key) + _validate_not_none('row_key', row_key) + _validate_not_none('entity', entity) + _validate_not_none('content_type', content_type) + request = HTTPRequest() + request.method = 'MERGE' + request.host = self._get_host() + request.path = '/' + \ + _str(table_name) + '(PartitionKey=\'' + \ + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')' + request.headers = [ + ('Content-Type', _str_or_none(content_type)), + ('If-Match', _str_or_none(if_match)) + ] + request.body = _get_request_body(_convert_entity_to_xml(entity)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _parse_response_for_dict_filter(response, filter=['etag']) + + def delete_entity(self, table_name, partition_key, row_key, + content_type='application/atom+xml', if_match='*'): + ''' + Deletes an existing entity in a table. + + table_name: Table name. + partition_key: PartitionKey of the entity. + row_key: RowKey of the entity. + content_type: Required. Must be set to application/atom+xml + if_match: + Optional. Specifies the condition for which the delete should be + performed. To force an unconditional delete, set to the wildcard + character (*). + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('partition_key', partition_key) + _validate_not_none('row_key', row_key) + _validate_not_none('content_type', content_type) + _validate_not_none('if_match', if_match) + request = HTTPRequest() + request.method = 'DELETE' + request.host = self._get_host() + request.path = '/' + \ + _str(table_name) + '(PartitionKey=\'' + \ + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')' + request.headers = [ + ('Content-Type', _str_or_none(content_type)), + ('If-Match', _str_or_none(if_match)) + ] + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + self._perform_request(request) + + def insert_or_replace_entity(self, table_name, partition_key, row_key, + entity, content_type='application/atom+xml'): + ''' + Replaces an existing entity or inserts a new entity if it does not + exist in the table. Because this operation can insert or update an + entity, it is also known as an "upsert" operation. + + table_name: Table name. + partition_key: PartitionKey of the entity. + row_key: RowKey of the entity. + entity: + Required. The entity object to insert. Could be a dict format or + entity object. + content_type: Required. Must be set to application/atom+xml + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('partition_key', partition_key) + _validate_not_none('row_key', row_key) + _validate_not_none('entity', entity) + _validate_not_none('content_type', content_type) + request = HTTPRequest() + request.method = 'PUT' + request.host = self._get_host() + request.path = '/' + \ + _str(table_name) + '(PartitionKey=\'' + \ + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')' + request.headers = [('Content-Type', _str_or_none(content_type))] + request.body = _get_request_body(_convert_entity_to_xml(entity)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _parse_response_for_dict_filter(response, filter=['etag']) + + def insert_or_merge_entity(self, table_name, partition_key, row_key, + entity, content_type='application/atom+xml'): + ''' + Merges an existing entity or inserts a new entity if it does not exist + in the table. Because this operation can insert or update an entity, + it is also known as an "upsert" operation. + + table_name: Table name. + partition_key: PartitionKey of the entity. + row_key: RowKey of the entity. + entity: + Required. The entity object to insert. Could be a dict format or + entity object. + content_type: Required. Must be set to application/atom+xml + ''' + _validate_not_none('table_name', table_name) + _validate_not_none('partition_key', partition_key) + _validate_not_none('row_key', row_key) + _validate_not_none('entity', entity) + _validate_not_none('content_type', content_type) + request = HTTPRequest() + request.method = 'MERGE' + request.host = self._get_host() + request.path = '/' + \ + _str(table_name) + '(PartitionKey=\'' + \ + _str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')' + request.headers = [('Content-Type', _str_or_none(content_type))] + request.body = _get_request_body(_convert_entity_to_xml(entity)) + request.path, request.query = _update_request_uri_query_local_storage( + request, self.use_local_storage) + request.headers = _update_storage_table_header(request) + response = self._perform_request(request) + + return _parse_response_for_dict_filter(response, filter=['etag']) + + def _perform_request_worker(self, request): + auth = _sign_storage_table_request(request, + self.account_name, + self.account_key) + request.headers.append(('Authorization', auth)) + return self._httpclient.perform_request(request) diff --git a/requirements/apache-libcloud-0.15.1.tar.gz b/requirements/apache-libcloud-0.15.1.tar.gz new file mode 100644 index 0000000000..93a6d25ce5 Binary files /dev/null and b/requirements/apache-libcloud-0.15.1.tar.gz differ diff --git a/requirements/azure-0.8.1.zip b/requirements/azure-0.8.1.zip new file mode 100644 index 0000000000..7b5aff0264 Binary files /dev/null and b/requirements/azure-0.8.1.zip differ