Adding deps.

This commit is contained in:
Luke Sneeringer 2014-07-28 14:40:51 -05:00
parent e837bbd2d0
commit 0f5263c027
20 changed files with 12009 additions and 0 deletions

View File

@ -0,0 +1,905 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import base64
import sys
import types
import warnings
if sys.version_info < (3,):
from urllib2 import quote as url_quote
from urllib2 import unquote as url_unquote
_strtype = basestring
else:
from urllib.parse import quote as url_quote
from urllib.parse import unquote as url_unquote
_strtype = str
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
#--------------------------------------------------------------------------
# constants
__author__ = 'Microsoft Corp. <ptvshelp@microsoft.com>'
__version__ = '0.8.1'
# Live ServiceClient URLs
BLOB_SERVICE_HOST_BASE = '.blob.core.windows.net'
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
MANAGEMENT_HOST = 'management.core.windows.net'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
DEV_TABLE_HOST = '127.0.0.1:10002'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# All of our error messages
_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.'
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = \
'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \
'Partition Key should be the same in a batch operations'
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \
'Row Keys should not be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = \
'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = \
'Message is not peek locked and cannot be unlocked.'
_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'
_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_SERVICEBUS_MISSING_INFO = \
'You need to provide servicebus namespace, access key and Issuer'
_ERROR_STORAGE_MISSING_INFO = \
'You need to provide both account name and access key'
_ERROR_ACCESS_POLICY = \
'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \
'instance'
_WARNING_VALUE_SHOULD_BE_BYTES = \
'Warning: {0} must be bytes data type. It will be converted ' + \
'automatically, with utf-8 text encoding.'
_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_VALUE_NEGATIVE = '{0} should not be negative.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \
'Cannot serialize the specified value ({0}) to an entity. Please use ' + \
'an EntityProperty (which can specify custom types), int, str, bool, ' + \
'or datetime.'
_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \
'Invalid page blob size: {0}. ' + \
'The size must be aligned to a 512-byte boundary.'
_USER_AGENT_STRING = 'pyazure/' + __version__
METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
class WindowsAzureData(object):
''' This is the base of data class.
It is only used to check whether it is instance or not. '''
pass
class WindowsAzureError(Exception):
''' WindowsAzure Excpetion base class. '''
def __init__(self, message):
super(WindowsAzureError, self).__init__(message)
class WindowsAzureConflictError(WindowsAzureError):
'''Indicates that the resource could not be created because it already
exists'''
def __init__(self, message):
super(WindowsAzureConflictError, self).__init__(message)
class WindowsAzureMissingResourceError(WindowsAzureError):
'''Indicates that a request for a request for a resource (queue, table,
container, etc...) failed because the specified resource does not exist'''
def __init__(self, message):
super(WindowsAzureMissingResourceError, self).__init__(message)
class WindowsAzureBatchOperationError(WindowsAzureError):
'''Indicates that a batch operation failed'''
def __init__(self, message, code):
super(WindowsAzureBatchOperationError, self).__init__(message)
self.code = code
class Feed(object):
pass
class _Base64String(str):
pass
class HeaderDict(dict):
def __getitem__(self, index):
return super(HeaderDict, self).__getitem__(index.lower())
def _encode_base64(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(data):
decoded_bytes = _decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _get_entry_properties(xmlstr, include_id, id_prefix_to_skip=None):
''' get properties from entry xml '''
xmldoc = minidom.parseString(xmlstr)
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
etag = entry.getAttributeNS(METADATA_NS, 'etag')
if etag:
properties['etag'] = etag
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
if name.firstChild is not None:
properties['author'] = name.firstChild.nodeValue
if include_id:
for id in _get_child_nodes(entry, 'id'):
properties['name'] = _get_readable_id(
id.firstChild.nodeValue, id_prefix_to_skip)
return properties
def _get_first_child_node_value(parent_node, node_name):
xml_attrs = _get_child_nodes(parent_node, node_name)
if xml_attrs:
xml_attr = xml_attrs[0]
if xml_attr.firstChild:
value = xml_attr.firstChild.nodeValue
return value
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
def _get_children_from_path(node, *path):
'''descends through a hierarchy of nodes returning the list of children
at the inner most level. Only returns children who share a common parent,
not cousins.'''
cur = node
for index, child in enumerate(path):
if isinstance(child, _strtype):
next = _get_child_nodes(cur, child)
else:
next = _get_child_nodesNS(cur, *child)
if index == len(path) - 1:
return next
elif not next:
break
cur = next[0]
return []
def _get_child_nodesNS(node, ns, tagName):
return [childNode for childNode in node.getElementsByTagNameNS(ns, tagName)
if childNode.parentNode == node]
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
if sys.version_info < (3,):
_unicode_type = unicode
def _str(value):
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
def _str_or_none(value):
if value is None:
return None
return _str(value)
def _int_or_none(value):
if value is None:
return None
return str(int(value))
def _bool_or_none(value):
if value is None:
return None
if isinstance(value, bool):
if value:
return 'true'
else:
return 'false'
return str(value)
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _find_namespaces_from_child(parent, child, namespaces):
"""Recursively searches from the parent to the child,
gathering all the applicable namespaces along the way"""
for cur_child in parent.childNodes:
if cur_child is child:
return True
if _find_namespaces_from_child(cur_child, child, namespaces):
# we are the parent node
for key in cur_child.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
namespaces[key] = cur_child.attributes[key]
break
return False
def _find_namespaces(parent, child):
res = {}
for key in parent.documentElement.attributes.keys():
if key.startswith('xmlns:') or key == 'xmlns':
res[key] = parent.documentElement.attributes[key]
_find_namespaces_from_child(parent, child, res)
return res
def _clone_node_with_namespaces(node_to_clone, original_doc):
clone = node_to_clone.cloneNode(True)
for key, value in _find_namespaces(original_doc, node_to_clone).items():
clone.attributes[key] = value
return clone
def _convert_response_to_feeds(response, convert_func):
if response is None:
return None
feeds = _list_of(Feed)
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
xmldoc = minidom.parseString(response.body)
xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')
if not xml_entries:
# in some cases, response contains only entry but no feed
xml_entries = _get_children_from_path(xmldoc, 'entry')
for xml_entry in xml_entries:
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
feeds.append(convert_func(new_node.toxml('utf-8')))
return feeds
def _validate_type_bytes(param_name, param):
if not isinstance(param, bytes):
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_NONE.format(param_name))
def _fill_list_of(xmldoc, element_type, xml_element_name):
xmlelements = _get_child_nodes(xmldoc, xml_element_name)
return [_parse_response_body_from_xml_node(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,
xml_element_name):
'''Converts an xml fragment into a list of scalar types. The parent xml
element contains a flat list of xml elements which are converted into the
specified scalar type and added to the list.
Example:
xmldoc=
<Endpoints>
<Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>
</Endpoints>
element_type=str
parent_xml_element_name='Endpoints'
xml_element_name='Endpoint'
'''
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
return [_get_node_value(xmlelement, element_type) \
for xmlelement in xmlelements]
def _fill_dict(xmldoc, element_name):
xmlelements = _get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,
key_xml_element_name, value_xml_element_name):
'''Converts an xml fragment into a dictionary. The parent xml element
contains a list of xml elements where each element has a child element for
the key, and another for the value.
Example:
xmldoc=
<ExtendedProperties>
<ExtendedProperty>
<Name>Ext1</Name>
<Value>Val1</Value>
</ExtendedProperty>
<ExtendedProperty>
<Name>Ext2</Name>
<Value>Val2</Value>
</ExtendedProperty>
</ExtendedProperties>
element_type=str
parent_xml_element_name='ExtendedProperties'
pair_xml_element_name='ExtendedProperty'
key_xml_element_name='Name'
value_xml_element_name='Value'
'''
return_obj = {}
xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
for pair in xmlelements:
keys = _get_child_nodes(pair, key_xml_element_name)
values = _get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type.
'''
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements:
return None
return_obj = return_type()
_fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
xmlelements = _get_child_nodes(
xmldoc, _get_serialization_name(element_name))
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
else:
return type(data_member)(value)
def _get_node_value(xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return _to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
# Previous versions of the SDK allowed data types other than bytes to be
# passed in, and they would be auto-converted to bytes. We preserve this
# behavior when running under 2.7, but issue a warning.
# Python 3 support is new, so we reject anything that's not bytes.
if sys.version_info < (3,):
warnings.warn(_WARNING_VALUE_SHOULD_BE_BYTES.format(param_name))
return _get_request_body(param_value)
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
respbody = response.body
return_obj = return_type()
doc = minidom.parseString(respbody)
items = []
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
# path is something like Queues, Queue
for child in _get_children_from_path(enum_results,
resp_type,
resp_type[:-1]):
items.append(_fill_instance_element(child, item_type))
for name, value in vars(return_obj).items():
# queues, Queues, this is the list its self which we populated
# above
if name == resp_type.lower():
# the list its self.
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
def _parse_simple_list(response, type, item_type, list_name):
respbody = response.body
res = type()
res_items = []
doc = minidom.parseString(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in _get_children_from_path(doc, type_name, item_name):
res_items.append(_fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
def _parse_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
return _parse_response_body_from_xml_text(response.body, return_type)
def _fill_data_to_return_object(node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _list_of):
setattr(return_obj,
name,
_fill_list_of(node,
value.list_type,
value.xml_element_name))
elif isinstance(value, _scalar_list_of):
setattr(return_obj,
name,
_fill_scalar_list_of(node,
value.list_type,
_get_serialization_name(name),
value.xml_element_name))
elif isinstance(value, _dict_of):
setattr(return_obj,
name,
_fill_dict_of(node,
_get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name))
elif isinstance(value, WindowsAzureData):
setattr(return_obj,
name,
_fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj,
name,
_fill_dict(node, _get_serialization_name(name)))
elif isinstance(value, _Base64String):
value = _fill_data_minidom(node, name, '')
if value is not None:
value = _decode_base64_to_text(value)
# always set the attribute, so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = _fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_fill_data_to_return_object(node, return_obj)
return return_obj
def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in _get_child_nodes(doc, return_type.__name__):
_fill_data_to_return_object(node, return_obj)
return return_obj
class _dict_of(dict):
"""a dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists"""
def __init__(self, pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_dict_of, self).__init__()
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_list_of, self).__init__()
class _scalar_list_of(list):
"""a list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(_scalar_list_of, self).__init__()
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _dont_fail_on_exist(error):
''' don't throw exception if the resource exists.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureConflictError):
return False
else:
raise error
def _dont_fail_not_exist(error):
''' don't throw exception if the resource doesn't exist.
This is called by create_* APIs with fail_on_exist=False'''
if isinstance(error, WindowsAzureMissingResourceError):
return False
else:
raise error
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
if http_error.status == 409:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(str(http_error)))
elif http_error.status == 404:
raise WindowsAzureMissingResourceError(
_ERROR_NOT_FOUND.format(str(http_error)))
else:
if http_error.respbody is not None:
raise WindowsAzureError(
_ERROR_UNKNOWN.format(str(http_error)) + '\n' + \
http_error.respbody.decode('utf-8'))
else:
raise WindowsAzureError(_ERROR_UNKNOWN.format(str(http_error)))
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response
header. Filter out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter
out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None

View File

@ -0,0 +1,73 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
HTTP_RESPONSE_NO_CONTENT = 204
class HTTPError(Exception):
''' HTTP Exception when response status code >= 300 '''
def __init__(self, status, message, respheader, respbody):
'''Creates a new HTTPError with the specified status, message,
response headers and body'''
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
"""Represents a response from an HTTP request. An HTTPResponse has the
following attributes:
status: the status code of the response
message: the message
headers: the returned headers, as a list of (name, value) pairs
body: the body of the response
"""
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''Represents an HTTP Request. An HTTP Request consists of the following
attributes:
host: the host name to connect to
method: the method to use to connect (string such as GET, POST, PUT, etc.)
path: the uri fragment
query: query parameters specified as a list of (name, value) pairs
headers: header values specified as (name, value) pairs
body: the body of the request.
protocol_override:
specify to use this protocol instead of the global one stored in
_HTTPClient.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = [] # list of (header name, header value)
self.body = ''
self.protocol_override = None

View File

@ -0,0 +1,339 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import uuid
from azure import (
_update_request_uri_query,
WindowsAzureError,
WindowsAzureBatchOperationError,
_get_children_from_path,
url_unquote,
_ERROR_CANNOT_FIND_PARTITION_KEY,
_ERROR_CANNOT_FIND_ROW_KEY,
_ERROR_INCORRECT_TABLE_IN_BATCH,
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
_ERROR_BATCH_COMMIT_FAIL,
)
from azure.http import HTTPError, HTTPRequest, HTTPResponse
from azure.http.httpclient import _HTTPClient
from azure.storage import (
_update_storage_table_header,
METADATA_NS,
_sign_storage_table_request,
)
from xml.dom import minidom
_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
if sys.version_info < (3,):
def _new_boundary():
return str(uuid.uuid1())
else:
def _new_boundary():
return str(uuid.uuid1()).encode('utf-8')
class _BatchClient(_HTTPClient):
'''
This is the class that is used for batch operation for storage table
service. It only supports one changeset.
'''
def __init__(self, service_instance, account_key, account_name,
protocol='http'):
_HTTPClient.__init__(self, service_instance, account_name=account_name,
account_key=account_key, protocol=protocol)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
def get_request_table(self, request):
'''
Extracts table name from request.uri. The request.uri has either
"/mytable(...)" or "/mytable" format.
request: the request to insert, update or delete entity
'''
if '(' in request.path:
pos = request.path.find('(')
return request.path[1:pos]
else:
return request.path[1:]
def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request: the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = minidom.parseString(request.body)
part_key = _get_children_from_path(
doc, 'entry', 'content', (METADATA_NS, 'properties'),
(_DATASERVICES_NS, 'PartitionKey'))
if not part_key:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return part_key[0].firstChild.nodeValue
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2]
def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request: the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = minidom.parseString(request.body)
row_key = _get_children_from_path(
doc, 'entry', 'content', (METADATA_NS, 'properties'),
(_DATASERVICES_NS, 'RowKey'))
if not row_key:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
return row_key[0].firstChild.nodeValue
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key
def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request: the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request)
def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request: the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request)
def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request: the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request))
def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch: batch operation flag.
batch_table: the table name of the batch operation
batch_partition_key: the PartitionKey of the batch requests.
batch_row_keys: the RowKey list of adding requests.
batch_requests: the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = []
def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request: the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request)
def commit_batch(self):
''' Resets batch flag and commits the batch requests. '''
if self.is_batch:
self.is_batch = False
self.commit_batch_requests()
def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0])
def cancel_batch(self):
''' Resets the batch flag. '''
self.is_batch = False
def _parse_batch_response(self, body):
parts = body.split(b'--changesetresponse_')
responses = []
for part in parts:
httpLocation = part.find(b'HTTP/')
if httpLocation > 0:
response = self._parse_batch_response_part(part[httpLocation:])
responses.append(response)
return responses
def _parse_batch_response_part(self, part):
lines = part.splitlines();
# First line is the HTTP status/reason
status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
# Followed by headers and body
headers = []
body = b''
isBody = False
for line in lines[1:]:
if line == b'' and not isBody:
isBody = True
elif isBody:
body += line
else:
headerName, _, headerVal = line.partition(b':')
headers.append((headerName.lower(), headerVal))
return HTTPResponse(int(status), reason.strip(), headers, body)
def _report_batch_error(self, response):
xml = response.body.decode('utf-8')
doc = minidom.parseString(xml)
n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'code')
code = n[0].firstChild.nodeValue if n and n[0].firstChild else ''
n = _get_children_from_path(doc, (METADATA_NS, 'error'), 'message')
message = n[0].firstChild.nodeValue if n and n[0].firstChild else xml
raise WindowsAzureBatchOperationError(message, code)

View File

@ -0,0 +1,223 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import base64
import os
import sys
if sys.version_info < (3,):
from httplib import (
HTTPSConnection,
HTTPConnection,
HTTP_PORT,
HTTPS_PORT,
)
from urlparse import urlparse
else:
from http.client import (
HTTPSConnection,
HTTPConnection,
HTTP_PORT,
HTTPS_PORT,
)
from urllib.parse import urlparse
from azure.http import HTTPError, HTTPResponse
from azure import _USER_AGENT_STRING, _update_request_uri_query
class _HTTPClient(object):
'''
Takes the request and sends it to cloud service and returns the response.
'''
def __init__(self, service_instance, cert_file=None, account_name=None,
account_key=None, service_namespace=None, issuer=None,
protocol='https'):
'''
service_instance: service client instance.
cert_file:
certificate file name/location. This is only used in hosted
service management.
account_name: the storage account.
account_key:
the storage account access key for storage services or servicebus
access key for service bus service.
service_namespace: the service namespace for service bus.
issuer: the issuer for service bus service.
'''
self.service_instance = service_instance
self.status = None
self.respheader = None
self.message = None
self.cert_file = cert_file
self.account_name = account_name
self.account_key = account_key
self.service_namespace = service_namespace
self.issuer = issuer
self.protocol = protocol
self.proxy_host = None
self.proxy_port = None
self.proxy_user = None
self.proxy_password = None
self.use_httplib = self.should_use_httplib()
def should_use_httplib(self):
if sys.platform.lower().startswith('win') and self.cert_file:
# On Windows, auto-detect between Windows Store Certificate
# (winhttp) and OpenSSL .pem certificate file (httplib).
#
# We used to only support certificates installed in the Windows
# Certificate Store.
# cert_file example: CURRENT_USER\my\CertificateName
#
# We now support using an OpenSSL .pem certificate file,
# for a consistent experience across all platforms.
# cert_file example: account\certificate.pem
#
# When using OpenSSL .pem certificate file on Windows, make sure
# you are on CPython 2.7.4 or later.
# If it's not an existing file on disk, then treat it as a path in
# the Windows Certificate Store, which means we can't use httplib.
if not os.path.isfile(self.cert_file):
return False
return True
def set_proxy(self, host, port, user, password):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self.proxy_host = host
self.proxy_port = port
self.proxy_user = user
self.proxy_password = password
def get_connection(self, request):
''' Create connection for the request. '''
protocol = request.protocol_override \
if request.protocol_override else self.protocol
target_host = request.host
target_port = HTTP_PORT if protocol == 'http' else HTTPS_PORT
if not self.use_httplib:
import azure.http.winhttp
connection = azure.http.winhttp._HTTPConnection(
target_host, cert_file=self.cert_file, protocol=protocol)
proxy_host = self.proxy_host
proxy_port = self.proxy_port
else:
if ':' in target_host:
target_host, _, target_port = target_host.rpartition(':')
if self.proxy_host:
proxy_host = target_host
proxy_port = target_port
host = self.proxy_host
port = self.proxy_port
else:
host = target_host
port = target_port
if protocol == 'http':
connection = HTTPConnection(host, int(port))
else:
connection = HTTPSConnection(
host, int(port), cert_file=self.cert_file)
if self.proxy_host:
headers = None
if self.proxy_user and self.proxy_password:
auth = base64.encodestring(
"{0}:{1}".format(self.proxy_user, self.proxy_password))
headers = {'Proxy-Authorization': 'Basic {0}'.format(auth)}
connection.set_tunnel(proxy_host, int(proxy_port), headers)
return connection
def send_request_headers(self, connection, request_headers):
if self.use_httplib:
if self.proxy_host:
for i in connection._buffer:
if i.startswith("Host: "):
connection._buffer.remove(i)
connection.putheader(
'Host', "{0}:{1}".format(connection._tunnel_host,
connection._tunnel_port))
for name, value in request_headers:
if value:
connection.putheader(name, value)
connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
def send_request_body(self, connection, request_body):
if request_body:
assert isinstance(request_body, bytes)
connection.send(request_body)
elif (not isinstance(connection, HTTPSConnection) and
not isinstance(connection, HTTPConnection)):
connection.send(None)
def perform_request(self, request):
''' Sends request to cloud service server and return the response. '''
connection = self.get_connection(request)
try:
connection.putrequest(request.method, request.path)
if not self.use_httplib:
if self.proxy_host and self.proxy_user:
connection.set_proxy_credentials(
self.proxy_user, self.proxy_password)
self.send_request_headers(connection, request.headers)
self.send_request_body(connection, request.body)
resp = connection.getresponse()
self.status = int(resp.status)
self.message = resp.reason
self.respheader = headers = resp.getheaders()
# for consistency across platforms, make header names lowercase
for i, value in enumerate(headers):
headers[i] = (value[0].lower(), value[1])
respbody = None
if resp.length is None:
respbody = resp.read()
elif resp.length > 0:
respbody = resp.read(resp.length)
response = HTTPResponse(
int(resp.status), resp.reason, headers, respbody)
if self.status == 307:
new_url = urlparse(dict(headers)['location'])
request.host = new_url.hostname
request.path = new_url.path
request.path, request.query = _update_request_uri_query(request)
return self.perform_request(request)
if self.status >= 300:
raise HTTPError(self.status, self.message,
self.respheader, respbody)
return response
finally:
connection.close()

View File

@ -0,0 +1,471 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from ctypes import (
c_void_p,
c_long,
c_ulong,
c_longlong,
c_ulonglong,
c_short,
c_ushort,
c_wchar_p,
c_byte,
byref,
Structure,
Union,
POINTER,
WINFUNCTYPE,
HRESULT,
oledll,
WinDLL,
)
import ctypes
import sys
if sys.version_info >= (3,):
def unicode(text):
return text
#------------------------------------------------------------------------------
# Constants that are used in COM operations
VT_EMPTY = 0
VT_NULL = 1
VT_I2 = 2
VT_I4 = 3
VT_BSTR = 8
VT_BOOL = 11
VT_I1 = 16
VT_UI1 = 17
VT_UI2 = 18
VT_UI4 = 19
VT_I8 = 20
VT_UI8 = 21
VT_ARRAY = 8192
HTTPREQUEST_PROXYSETTING_PROXY = 2
HTTPREQUEST_SETCREDENTIALS_FOR_PROXY = 1
HTTPREQUEST_PROXY_SETTING = c_long
HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long
#------------------------------------------------------------------------------
# Com related APIs that are used.
_ole32 = oledll.ole32
_oleaut32 = WinDLL('oleaut32')
_CLSIDFromString = _ole32.CLSIDFromString
_CoInitialize = _ole32.CoInitialize
_CoInitialize.argtypes = [c_void_p]
_CoCreateInstance = _ole32.CoCreateInstance
_SysAllocString = _oleaut32.SysAllocString
_SysAllocString.restype = c_void_p
_SysAllocString.argtypes = [c_wchar_p]
_SysFreeString = _oleaut32.SysFreeString
_SysFreeString.argtypes = [c_void_p]
# SAFEARRAY*
# SafeArrayCreateVector(_In_ VARTYPE vt,_In_ LONG lLbound,_In_ ULONG
# cElements);
_SafeArrayCreateVector = _oleaut32.SafeArrayCreateVector
_SafeArrayCreateVector.restype = c_void_p
_SafeArrayCreateVector.argtypes = [c_ushort, c_long, c_ulong]
# HRESULT
# SafeArrayAccessData(_In_ SAFEARRAY *psa, _Out_ void **ppvData);
_SafeArrayAccessData = _oleaut32.SafeArrayAccessData
_SafeArrayAccessData.argtypes = [c_void_p, POINTER(c_void_p)]
# HRESULT
# SafeArrayUnaccessData(_In_ SAFEARRAY *psa);
_SafeArrayUnaccessData = _oleaut32.SafeArrayUnaccessData
_SafeArrayUnaccessData.argtypes = [c_void_p]
# HRESULT
# SafeArrayGetUBound(_In_ SAFEARRAY *psa, _In_ UINT nDim, _Out_ LONG
# *plUbound);
_SafeArrayGetUBound = _oleaut32.SafeArrayGetUBound
_SafeArrayGetUBound.argtypes = [c_void_p, c_ulong, POINTER(c_long)]
#------------------------------------------------------------------------------
class BSTR(c_wchar_p):
''' BSTR class in python. '''
def __init__(self, value):
super(BSTR, self).__init__(_SysAllocString(value))
def __del__(self):
_SysFreeString(self)
class VARIANT(Structure):
'''
VARIANT structure in python. Does not match the definition in
MSDN exactly & it is only mapping the used fields. Field names are also
slighty different.
'''
class _tagData(Union):
class _tagRecord(Structure):
_fields_ = [('pvoid', c_void_p), ('precord', c_void_p)]
_fields_ = [('llval', c_longlong),
('ullval', c_ulonglong),
('lval', c_long),
('ulval', c_ulong),
('ival', c_short),
('boolval', c_ushort),
('bstrval', BSTR),
('parray', c_void_p),
('record', _tagRecord)]
_fields_ = [('vt', c_ushort),
('wReserved1', c_ushort),
('wReserved2', c_ushort),
('wReserved3', c_ushort),
('vdata', _tagData)]
@staticmethod
def create_empty():
variant = VARIANT()
variant.vt = VT_EMPTY
variant.vdata.llval = 0
return variant
@staticmethod
def create_safearray_from_str(text):
variant = VARIANT()
variant.vt = VT_ARRAY | VT_UI1
length = len(text)
variant.vdata.parray = _SafeArrayCreateVector(VT_UI1, 0, length)
pvdata = c_void_p()
_SafeArrayAccessData(variant.vdata.parray, byref(pvdata))
ctypes.memmove(pvdata, text, length)
_SafeArrayUnaccessData(variant.vdata.parray)
return variant
@staticmethod
def create_bstr_from_str(text):
variant = VARIANT()
variant.vt = VT_BSTR
variant.vdata.bstrval = BSTR(text)
return variant
@staticmethod
def create_bool_false():
variant = VARIANT()
variant.vt = VT_BOOL
variant.vdata.boolval = 0
return variant
def is_safearray_of_bytes(self):
return self.vt == VT_ARRAY | VT_UI1
def str_from_safearray(self):
assert self.vt == VT_ARRAY | VT_UI1
pvdata = c_void_p()
count = c_long()
_SafeArrayGetUBound(self.vdata.parray, 1, byref(count))
count = c_long(count.value + 1)
_SafeArrayAccessData(self.vdata.parray, byref(pvdata))
text = ctypes.string_at(pvdata, count)
_SafeArrayUnaccessData(self.vdata.parray)
return text
def __del__(self):
_VariantClear(self)
# HRESULT VariantClear(_Inout_ VARIANTARG *pvarg);
_VariantClear = _oleaut32.VariantClear
_VariantClear.argtypes = [POINTER(VARIANT)]
class GUID(Structure):
''' GUID structure in python. '''
_fields_ = [("data1", c_ulong),
("data2", c_ushort),
("data3", c_ushort),
("data4", c_byte * 8)]
def __init__(self, name=None):
if name is not None:
_CLSIDFromString(unicode(name), byref(self))
class _WinHttpRequest(c_void_p):
'''
Maps the Com API to Python class functions. Not all methods in
IWinHttpWebRequest are mapped - only the methods we use.
'''
_AddRef = WINFUNCTYPE(c_long) \
(1, 'AddRef')
_Release = WINFUNCTYPE(c_long) \
(2, 'Release')
_SetProxy = WINFUNCTYPE(HRESULT,
HTTPREQUEST_PROXY_SETTING,
VARIANT,
VARIANT) \
(7, 'SetProxy')
_SetCredentials = WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
HTTPREQUEST_SETCREDENTIALS_FLAGS) \
(8, 'SetCredentials')
_Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT) \
(9, 'Open')
_SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR) \
(10, 'SetRequestHeader')
_GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p)) \
(11, 'GetResponseHeader')
_GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \
(12, 'GetAllResponseHeaders')
_Send = WINFUNCTYPE(HRESULT, VARIANT) \
(13, 'Send')
_Status = WINFUNCTYPE(HRESULT, POINTER(c_long)) \
(14, 'Status')
_StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \
(15, 'StatusText')
_ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p)) \
(16, 'ResponseText')
_ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \
(17, 'ResponseBody')
_ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT)) \
(18, 'ResponseStream')
_WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort)) \
(21, 'WaitForResponse')
_Abort = WINFUNCTYPE(HRESULT) \
(22, 'Abort')
_SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long) \
(23, 'SetTimeouts')
_SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR) \
(24, 'SetClientCertificate')
def open(self, method, url):
'''
Opens the request.
method: the request VERB 'GET', 'POST', etc.
url: the url to connect
'''
_WinHttpRequest._SetTimeouts(self, 0, 65000, 65000, 65000)
flag = VARIANT.create_bool_false()
_method = BSTR(method)
_url = BSTR(url)
_WinHttpRequest._Open(self, _method, _url, flag)
def set_request_header(self, name, value):
''' Sets the request header. '''
_name = BSTR(name)
_value = BSTR(value)
_WinHttpRequest._SetRequestHeader(self, _name, _value)
def get_all_response_headers(self):
''' Gets back all response headers. '''
bstr_headers = c_void_p()
_WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))
bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)
headers = bstr_headers.value
_SysFreeString(bstr_headers)
return headers
def send(self, request=None):
''' Sends the request body. '''
# Sends VT_EMPTY if it is GET, HEAD request.
if request is None:
var_empty = VARIANT.create_empty()
_WinHttpRequest._Send(self, var_empty)
else: # Sends request body as SAFEArray.
_request = VARIANT.create_safearray_from_str(request)
_WinHttpRequest._Send(self, _request)
def status(self):
''' Gets status of response. '''
status = c_long()
_WinHttpRequest._Status(self, byref(status))
return int(status.value)
def status_text(self):
''' Gets status text of response. '''
bstr_status_text = c_void_p()
_WinHttpRequest._StatusText(self, byref(bstr_status_text))
bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)
status_text = bstr_status_text.value
_SysFreeString(bstr_status_text)
return status_text
def response_body(self):
'''
Gets response body as a SAFEARRAY and converts the SAFEARRAY to str.
If it is an xml file, it always contains 3 characters before <?xml,
so we remove them.
'''
var_respbody = VARIANT()
_WinHttpRequest._ResponseBody(self, byref(var_respbody))
if var_respbody.is_safearray_of_bytes():
respbody = var_respbody.str_from_safearray()
if respbody[3:].startswith(b'<?xml') and\
respbody.startswith(b'\xef\xbb\xbf'):
respbody = respbody[3:]
return respbody
else:
return ''
def set_client_certificate(self, certificate):
'''Sets client certificate for the request. '''
_certificate = BSTR(certificate)
_WinHttpRequest._SetClientCertificate(self, _certificate)
def set_tunnel(self, host, port):
''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''
url = host
if port:
url = url + u':' + port
var_host = VARIANT.create_bstr_from_str(url)
var_empty = VARIANT.create_empty()
_WinHttpRequest._SetProxy(
self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)
def set_proxy_credentials(self, user, password):
_WinHttpRequest._SetCredentials(
self, BSTR(user), BSTR(password),
HTTPREQUEST_SETCREDENTIALS_FOR_PROXY)
def __del__(self):
if self.value is not None:
_WinHttpRequest._Release(self)
class _Response(object):
''' Response class corresponding to the response returned from httplib
HTTPConnection. '''
def __init__(self, _status, _status_text, _length, _headers, _respbody):
self.status = _status
self.reason = _status_text
self.length = _length
self.headers = _headers
self.respbody = _respbody
def getheaders(self):
'''Returns response headers.'''
return self.headers
def read(self, _length):
'''Returns resonse body. '''
return self.respbody[:_length]
class _HTTPConnection(object):
''' Class corresponding to httplib HTTPConnection class. '''
def __init__(self, host, cert_file=None, key_file=None, protocol='http'):
''' initialize the IWinHttpWebRequest Com Object.'''
self.host = unicode(host)
self.cert_file = cert_file
self._httprequest = _WinHttpRequest()
self.protocol = protocol
clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')
iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')
_CoInitialize(None)
_CoCreateInstance(byref(clsid), 0, 1, byref(iid),
byref(self._httprequest))
def close(self):
pass
def set_tunnel(self, host, port=None, headers=None):
''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''
self._httprequest.set_tunnel(unicode(host), unicode(str(port)))
def set_proxy_credentials(self, user, password):
self._httprequest.set_proxy_credentials(
unicode(user), unicode(password))
def putrequest(self, method, uri):
''' Connects to host and sends the request. '''
protocol = unicode(self.protocol + '://')
url = protocol + self.host + unicode(uri)
self._httprequest.open(unicode(method), url)
# sets certificate for the connection if cert_file is set.
if self.cert_file is not None:
self._httprequest.set_client_certificate(unicode(self.cert_file))
def putheader(self, name, value):
''' Sends the headers of request. '''
if sys.version_info < (3,):
name = str(name).decode('utf-8')
value = str(value).decode('utf-8')
self._httprequest.set_request_header(name, value)
def endheaders(self):
''' No operation. Exists only to provide the same interface of httplib
HTTPConnection.'''
pass
def send(self, request_body):
''' Sends request body. '''
if not request_body:
self._httprequest.send()
else:
self._httprequest.send(request_body)
def getresponse(self):
''' Gets the response and generates the _Response object'''
status = self._httprequest.status()
status_text = self._httprequest.status_text()
resp_headers = self._httprequest.get_all_response_headers()
fixed_headers = []
for resp_header in resp_headers.split('\n'):
if (resp_header.startswith('\t') or\
resp_header.startswith(' ')) and fixed_headers:
# append to previous header
fixed_headers[-1] += resp_header
else:
fixed_headers.append(resp_header)
headers = []
for resp_header in fixed_headers:
if ':' in resp_header:
pos = resp_header.find(':')
headers.append(
(resp_header[:pos].lower(), resp_header[pos + 1:].strip()))
body = self._httprequest.response_body()
length = len(body)
return _Response(status, status_text, length, headers, body)

View File

@ -0,0 +1,851 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import sys
from datetime import datetime
from xml.dom import minidom
from azure import (
WindowsAzureData,
WindowsAzureError,
xml_escape,
_create_entry,
_general_error_handler,
_get_entry_properties,
_get_child_nodes,
_get_children_from_path,
_get_first_child_node_value,
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE,
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK,
_ERROR_QUEUE_NOT_FOUND,
_ERROR_TOPIC_NOT_FOUND,
)
from azure.http import HTTPError
# default rule name for subscription
DEFAULT_RULE_NAME = '$Default'
#-----------------------------------------------------------------------------
# Constants for Azure app environment settings.
AZURE_SERVICEBUS_NAMESPACE = 'AZURE_SERVICEBUS_NAMESPACE'
AZURE_SERVICEBUS_ACCESS_KEY = 'AZURE_SERVICEBUS_ACCESS_KEY'
AZURE_SERVICEBUS_ISSUER = 'AZURE_SERVICEBUS_ISSUER'
# namespace used for converting rules to objects
XML_SCHEMA_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
class Queue(WindowsAzureData):
''' Queue class corresponding to Queue Description:
http://msdn.microsoft.com/en-us/library/windowsazure/hh780773'''
def __init__(self, lock_duration=None, max_size_in_megabytes=None,
requires_duplicate_detection=None, requires_session=None,
default_message_time_to_live=None,
dead_lettering_on_message_expiration=None,
duplicate_detection_history_time_window=None,
max_delivery_count=None, enable_batched_operations=None,
size_in_bytes=None, message_count=None):
self.lock_duration = lock_duration
self.max_size_in_megabytes = max_size_in_megabytes
self.requires_duplicate_detection = requires_duplicate_detection
self.requires_session = requires_session
self.default_message_time_to_live = default_message_time_to_live
self.dead_lettering_on_message_expiration = \
dead_lettering_on_message_expiration
self.duplicate_detection_history_time_window = \
duplicate_detection_history_time_window
self.max_delivery_count = max_delivery_count
self.enable_batched_operations = enable_batched_operations
self.size_in_bytes = size_in_bytes
self.message_count = message_count
class Topic(WindowsAzureData):
''' Topic class corresponding to Topic Description:
http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. '''
def __init__(self, default_message_time_to_live=None,
max_size_in_megabytes=None, requires_duplicate_detection=None,
duplicate_detection_history_time_window=None,
enable_batched_operations=None, size_in_bytes=None):
self.default_message_time_to_live = default_message_time_to_live
self.max_size_in_megabytes = max_size_in_megabytes
self.requires_duplicate_detection = requires_duplicate_detection
self.duplicate_detection_history_time_window = \
duplicate_detection_history_time_window
self.enable_batched_operations = enable_batched_operations
self.size_in_bytes = size_in_bytes
@property
def max_size_in_mega_bytes(self):
import warnings
warnings.warn(
'This attribute has been changed to max_size_in_megabytes.')
return self.max_size_in_megabytes
@max_size_in_mega_bytes.setter
def max_size_in_mega_bytes(self, value):
self.max_size_in_megabytes = value
class Subscription(WindowsAzureData):
''' Subscription class corresponding to Subscription Description:
http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. '''
def __init__(self, lock_duration=None, requires_session=None,
default_message_time_to_live=None,
dead_lettering_on_message_expiration=None,
dead_lettering_on_filter_evaluation_exceptions=None,
enable_batched_operations=None, max_delivery_count=None,
message_count=None):
self.lock_duration = lock_duration
self.requires_session = requires_session
self.default_message_time_to_live = default_message_time_to_live
self.dead_lettering_on_message_expiration = \
dead_lettering_on_message_expiration
self.dead_lettering_on_filter_evaluation_exceptions = \
dead_lettering_on_filter_evaluation_exceptions
self.enable_batched_operations = enable_batched_operations
self.max_delivery_count = max_delivery_count
self.message_count = message_count
class Rule(WindowsAzureData):
''' Rule class corresponding to Rule Description:
http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. '''
def __init__(self, filter_type=None, filter_expression=None,
action_type=None, action_expression=None):
self.filter_type = filter_type
self.filter_expression = filter_expression
self.action_type = action_type
self.action_expression = action_type
class Message(WindowsAzureData):
''' Message class that used in send message/get mesage apis. '''
def __init__(self, body=None, service_bus_service=None, location=None,
custom_properties=None,
type='application/atom+xml;type=entry;charset=utf-8',
broker_properties=None):
self.body = body
self.location = location
self.broker_properties = broker_properties
self.custom_properties = custom_properties
self.type = type
self.service_bus_service = service_bus_service
self._topic_name = None
self._subscription_name = None
self._queue_name = None
if not service_bus_service:
return
# if location is set, then extracts the queue name for queue message and
# extracts the topic and subscriptions name if it is topic message.
if location:
if '/subscriptions/' in location:
pos = location.find('/subscriptions/')
pos1 = location.rfind('/', 0, pos - 1)
self._topic_name = location[pos1 + 1:pos]
pos += len('/subscriptions/')
pos1 = location.find('/', pos)
self._subscription_name = location[pos:pos1]
elif '/messages/' in location:
pos = location.find('/messages/')
pos1 = location.rfind('/', 0, pos - 1)
self._queue_name = location[pos1 + 1:pos]
def delete(self):
''' Deletes itself if find queue name or topic name and subscription
name. '''
if self._queue_name:
self.service_bus_service.delete_queue_message(
self._queue_name,
self.broker_properties['SequenceNumber'],
self.broker_properties['LockToken'])
elif self._topic_name and self._subscription_name:
self.service_bus_service.delete_subscription_message(
self._topic_name,
self._subscription_name,
self.broker_properties['SequenceNumber'],
self.broker_properties['LockToken'])
else:
raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE)
def unlock(self):
''' Unlocks itself if find queue name or topic name and subscription
name. '''
if self._queue_name:
self.service_bus_service.unlock_queue_message(
self._queue_name,
self.broker_properties['SequenceNumber'],
self.broker_properties['LockToken'])
elif self._topic_name and self._subscription_name:
self.service_bus_service.unlock_subscription_message(
self._topic_name,
self._subscription_name,
self.broker_properties['SequenceNumber'],
self.broker_properties['LockToken'])
else:
raise WindowsAzureError(_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK)
def add_headers(self, request):
''' add addtional headers to request for message request.'''
# Adds custom properties
if self.custom_properties:
for name, value in self.custom_properties.items():
if sys.version_info < (3,) and isinstance(value, unicode):
request.headers.append(
(name, '"' + value.encode('utf-8') + '"'))
elif isinstance(value, str):
request.headers.append((name, '"' + str(value) + '"'))
elif isinstance(value, datetime):
request.headers.append(
(name, '"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '"'))
else:
request.headers.append((name, str(value).lower()))
# Adds content-type
request.headers.append(('Content-Type', self.type))
# Adds BrokerProperties
if self.broker_properties:
request.headers.append(
('BrokerProperties', str(self.broker_properties)))
return request.headers
def _create_message(response, service_instance):
''' Create message from response.
response: response from service bus cloud server.
service_instance: the service bus client.
'''
respbody = response.body
custom_properties = {}
broker_properties = None
message_type = None
message_location = None
# gets all information from respheaders.
for name, value in response.headers:
if name.lower() == 'brokerproperties':
broker_properties = ast.literal_eval(value)
elif name.lower() == 'content-type':
message_type = value
elif name.lower() == 'location':
message_location = value
elif name.lower() not in ['content-type',
'brokerproperties',
'transfer-encoding',
'server',
'location',
'date']:
if '"' in value:
value = value[1:-1]
try:
custom_properties[name] = datetime.strptime(
value, '%a, %d %b %Y %H:%M:%S GMT')
except ValueError:
custom_properties[name] = value
else: # only int, float or boolean
if value.lower() == 'true':
custom_properties[name] = True
elif value.lower() == 'false':
custom_properties[name] = False
# int('3.1') doesn't work so need to get float('3.14') first
elif str(int(float(value))) == value:
custom_properties[name] = int(value)
else:
custom_properties[name] = float(value)
if message_type == None:
message = Message(
respbody, service_instance, message_location, custom_properties,
'application/atom+xml;type=entry;charset=utf-8', broker_properties)
else:
message = Message(respbody, service_instance, message_location,
custom_properties, message_type, broker_properties)
return message
# convert functions
def _convert_response_to_rule(response):
return _convert_xml_to_rule(response.body)
def _convert_xml_to_rule(xmlstr):
''' Converts response xml to rule object.
The format of xml for rule:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<RuleDescription
xmlns:i="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<Filter i:type="SqlFilterExpression">
<SqlExpression>MyProperty='XYZ'</SqlExpression>
</Filter>
<Action i:type="SqlFilterAction">
<SqlExpression>set MyProperty2 = 'ABC'</SqlExpression>
</Action>
</RuleDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
rule = Rule()
for rule_desc in _get_children_from_path(xmldoc,
'entry',
'content',
'RuleDescription'):
for xml_filter in _get_child_nodes(rule_desc, 'Filter'):
filter_type = xml_filter.getAttributeNS(
XML_SCHEMA_NAMESPACE, 'type')
setattr(rule, 'filter_type', str(filter_type))
if xml_filter.childNodes:
for expr in _get_child_nodes(xml_filter, 'SqlExpression'):
setattr(rule, 'filter_expression',
expr.firstChild.nodeValue)
for xml_action in _get_child_nodes(rule_desc, 'Action'):
action_type = xml_action.getAttributeNS(
XML_SCHEMA_NAMESPACE, 'type')
setattr(rule, 'action_type', str(action_type))
if xml_action.childNodes:
action_expression = xml_action.childNodes[0].firstChild
if action_expression:
setattr(rule, 'action_expression',
action_expression.nodeValue)
# extract id, updated and name value from feed entry and set them of rule.
for name, value in _get_entry_properties(xmlstr, True, '/rules').items():
setattr(rule, name, value)
return rule
def _convert_response_to_queue(response):
return _convert_xml_to_queue(response.body)
def _parse_bool(value):
if value.lower() == 'true':
return True
return False
def _convert_xml_to_queue(xmlstr):
''' Converts xml response to queue object.
The format of xml response for queue:
<QueueDescription
xmlns=\"http://schemas.microsoft.com/netservices/2010/10/servicebus/connect\">
<MaxSizeInBytes>10000</MaxSizeInBytes>
<DefaultMessageTimeToLive>PT5M</DefaultMessageTimeToLive>
<LockDuration>PT2M</LockDuration>
<RequiresGroupedReceives>False</RequiresGroupedReceives>
<SupportsDuplicateDetection>False</SupportsDuplicateDetection>
...
</QueueDescription>
'''
xmldoc = minidom.parseString(xmlstr)
queue = Queue()
invalid_queue = True
# get node for each attribute in Queue class, if nothing found then the
# response is not valid xml for Queue.
for desc in _get_children_from_path(xmldoc,
'entry',
'content',
'QueueDescription'):
node_value = _get_first_child_node_value(desc, 'LockDuration')
if node_value is not None:
queue.lock_duration = node_value
invalid_queue = False
node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')
if node_value is not None:
queue.max_size_in_megabytes = int(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(
desc, 'RequiresDuplicateDetection')
if node_value is not None:
queue.requires_duplicate_detection = _parse_bool(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(desc, 'RequiresSession')
if node_value is not None:
queue.requires_session = _parse_bool(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(
desc, 'DefaultMessageTimeToLive')
if node_value is not None:
queue.default_message_time_to_live = node_value
invalid_queue = False
node_value = _get_first_child_node_value(
desc, 'DeadLetteringOnMessageExpiration')
if node_value is not None:
queue.dead_lettering_on_message_expiration = _parse_bool(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(
desc, 'DuplicateDetectionHistoryTimeWindow')
if node_value is not None:
queue.duplicate_detection_history_time_window = node_value
invalid_queue = False
node_value = _get_first_child_node_value(
desc, 'EnableBatchedOperations')
if node_value is not None:
queue.enable_batched_operations = _parse_bool(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount')
if node_value is not None:
queue.max_delivery_count = int(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(desc, 'MessageCount')
if node_value is not None:
queue.message_count = int(node_value)
invalid_queue = False
node_value = _get_first_child_node_value(desc, 'SizeInBytes')
if node_value is not None:
queue.size_in_bytes = int(node_value)
invalid_queue = False
if invalid_queue:
raise WindowsAzureError(_ERROR_QUEUE_NOT_FOUND)
# extract id, updated and name value from feed entry and set them of queue.
for name, value in _get_entry_properties(xmlstr, True).items():
setattr(queue, name, value)
return queue
def _convert_response_to_topic(response):
return _convert_xml_to_topic(response.body)
def _convert_xml_to_topic(xmlstr):
'''Converts xml response to topic
The xml format for topic:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<TopicDescription
xmlns:i="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>
<MaxSizeInMegabytes>1024</MaxSizeInMegabytes>
<RequiresDuplicateDetection>false</RequiresDuplicateDetection>
<DuplicateDetectionHistoryTimeWindow>P7D</DuplicateDetectionHistoryTimeWindow>
<DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>
</TopicDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
topic = Topic()
invalid_topic = True
# get node for each attribute in Topic class, if nothing found then the
# response is not valid xml for Topic.
for desc in _get_children_from_path(xmldoc,
'entry',
'content',
'TopicDescription'):
invalid_topic = True
node_value = _get_first_child_node_value(
desc, 'DefaultMessageTimeToLive')
if node_value is not None:
topic.default_message_time_to_live = node_value
invalid_topic = False
node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')
if node_value is not None:
topic.max_size_in_megabytes = int(node_value)
invalid_topic = False
node_value = _get_first_child_node_value(
desc, 'RequiresDuplicateDetection')
if node_value is not None:
topic.requires_duplicate_detection = _parse_bool(node_value)
invalid_topic = False
node_value = _get_first_child_node_value(
desc, 'DuplicateDetectionHistoryTimeWindow')
if node_value is not None:
topic.duplicate_detection_history_time_window = node_value
invalid_topic = False
node_value = _get_first_child_node_value(
desc, 'EnableBatchedOperations')
if node_value is not None:
topic.enable_batched_operations = _parse_bool(node_value)
invalid_topic = False
node_value = _get_first_child_node_value(desc, 'SizeInBytes')
if node_value is not None:
topic.size_in_bytes = int(node_value)
invalid_topic = False
if invalid_topic:
raise WindowsAzureError(_ERROR_TOPIC_NOT_FOUND)
# extract id, updated and name value from feed entry and set them of topic.
for name, value in _get_entry_properties(xmlstr, True).items():
setattr(topic, name, value)
return topic
def _convert_response_to_subscription(response):
return _convert_xml_to_subscription(response.body)
def _convert_xml_to_subscription(xmlstr):
'''Converts xml response to subscription
The xml format for subscription:
<entry xmlns='http://www.w3.org/2005/Atom'>
<content type='application/xml'>
<SubscriptionDescription
xmlns:i="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">
<LockDuration>PT5M</LockDuration>
<RequiresSession>false</RequiresSession>
<DefaultMessageTimeToLive>P10675199DT2H48M5.4775807S</DefaultMessageTimeToLive>
<DeadLetteringOnMessageExpiration>false</DeadLetteringOnMessageExpiration>
<DeadLetteringOnFilterEvaluationExceptions>true</DeadLetteringOnFilterEvaluationExceptions>
</SubscriptionDescription>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
subscription = Subscription()
for desc in _get_children_from_path(xmldoc,
'entry',
'content',
'SubscriptionDescription'):
node_value = _get_first_child_node_value(desc, 'LockDuration')
if node_value is not None:
subscription.lock_duration = node_value
node_value = _get_first_child_node_value(
desc, 'RequiresSession')
if node_value is not None:
subscription.requires_session = _parse_bool(node_value)
node_value = _get_first_child_node_value(
desc, 'DefaultMessageTimeToLive')
if node_value is not None:
subscription.default_message_time_to_live = node_value
node_value = _get_first_child_node_value(
desc, 'DeadLetteringOnFilterEvaluationExceptions')
if node_value is not None:
subscription.dead_lettering_on_filter_evaluation_exceptions = \
_parse_bool(node_value)
node_value = _get_first_child_node_value(
desc, 'DeadLetteringOnMessageExpiration')
if node_value is not None:
subscription.dead_lettering_on_message_expiration = \
_parse_bool(node_value)
node_value = _get_first_child_node_value(
desc, 'EnableBatchedOperations')
if node_value is not None:
subscription.enable_batched_operations = _parse_bool(node_value)
node_value = _get_first_child_node_value(
desc, 'MaxDeliveryCount')
if node_value is not None:
subscription.max_delivery_count = int(node_value)
node_value = _get_first_child_node_value(
desc, 'MessageCount')
if node_value is not None:
subscription.message_count = int(node_value)
for name, value in _get_entry_properties(xmlstr,
True,
'/subscriptions').items():
setattr(subscription, name, value)
return subscription
def _convert_subscription_to_xml(subscription):
'''
Converts a subscription object to xml to send. The order of each field of
subscription in xml is very important so we can't simple call
convert_class_to_xml.
subscription: the subsciption object to be converted.
'''
subscription_body = '<SubscriptionDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if subscription:
if subscription.lock_duration is not None:
subscription_body += ''.join(
['<LockDuration>',
str(subscription.lock_duration),
'</LockDuration>'])
if subscription.requires_session is not None:
subscription_body += ''.join(
['<RequiresSession>',
str(subscription.requires_session).lower(),
'</RequiresSession>'])
if subscription.default_message_time_to_live is not None:
subscription_body += ''.join(
['<DefaultMessageTimeToLive>',
str(subscription.default_message_time_to_live),
'</DefaultMessageTimeToLive>'])
if subscription.dead_lettering_on_message_expiration is not None:
subscription_body += ''.join(
['<DeadLetteringOnMessageExpiration>',
str(subscription.dead_lettering_on_message_expiration).lower(),
'</DeadLetteringOnMessageExpiration>'])
if subscription.dead_lettering_on_filter_evaluation_exceptions is not None:
subscription_body += ''.join(
['<DeadLetteringOnFilterEvaluationExceptions>',
str(subscription.dead_lettering_on_filter_evaluation_exceptions).lower(),
'</DeadLetteringOnFilterEvaluationExceptions>'])
if subscription.enable_batched_operations is not None:
subscription_body += ''.join(
['<EnableBatchedOperations>',
str(subscription.enable_batched_operations).lower(),
'</EnableBatchedOperations>'])
if subscription.max_delivery_count is not None:
subscription_body += ''.join(
['<MaxDeliveryCount>',
str(subscription.max_delivery_count),
'</MaxDeliveryCount>'])
if subscription.message_count is not None:
subscription_body += ''.join(
['<MessageCount>',
str(subscription.message_count),
'</MessageCount>'])
subscription_body += '</SubscriptionDescription>'
return _create_entry(subscription_body)
def _convert_rule_to_xml(rule):
'''
Converts a rule object to xml to send. The order of each field of rule
in xml is very important so we cann't simple call convert_class_to_xml.
rule: the rule object to be converted.
'''
rule_body = '<RuleDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if rule:
if rule.filter_type:
rule_body += ''.join(
['<Filter i:type="',
xml_escape(rule.filter_type),
'">'])
if rule.filter_type == 'CorrelationFilter':
rule_body += ''.join(
['<CorrelationId>',
xml_escape(rule.filter_expression),
'</CorrelationId>'])
else:
rule_body += ''.join(
['<SqlExpression>',
xml_escape(rule.filter_expression),
'</SqlExpression>'])
rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'
rule_body += '</Filter>'
if rule.action_type:
rule_body += ''.join(
['<Action i:type="',
xml_escape(rule.action_type),
'">'])
if rule.action_type == 'SqlRuleAction':
rule_body += ''.join(
['<SqlExpression>',
xml_escape(rule.action_expression),
'</SqlExpression>'])
rule_body += '<CompatibilityLevel>20</CompatibilityLevel>'
rule_body += '</Action>'
rule_body += '</RuleDescription>'
return _create_entry(rule_body)
def _convert_topic_to_xml(topic):
'''
Converts a topic object to xml to send. The order of each field of topic
in xml is very important so we cann't simple call convert_class_to_xml.
topic: the topic object to be converted.
'''
topic_body = '<TopicDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if topic:
if topic.default_message_time_to_live is not None:
topic_body += ''.join(
['<DefaultMessageTimeToLive>',
str(topic.default_message_time_to_live),
'</DefaultMessageTimeToLive>'])
if topic.max_size_in_megabytes is not None:
topic_body += ''.join(
['<MaxSizeInMegabytes>',
str(topic.max_size_in_megabytes),
'</MaxSizeInMegabytes>'])
if topic.requires_duplicate_detection is not None:
topic_body += ''.join(
['<RequiresDuplicateDetection>',
str(topic.requires_duplicate_detection).lower(),
'</RequiresDuplicateDetection>'])
if topic.duplicate_detection_history_time_window is not None:
topic_body += ''.join(
['<DuplicateDetectionHistoryTimeWindow>',
str(topic.duplicate_detection_history_time_window),
'</DuplicateDetectionHistoryTimeWindow>'])
if topic.enable_batched_operations is not None:
topic_body += ''.join(
['<EnableBatchedOperations>',
str(topic.enable_batched_operations).lower(),
'</EnableBatchedOperations>'])
if topic.size_in_bytes is not None:
topic_body += ''.join(
['<SizeInBytes>',
str(topic.size_in_bytes),
'</SizeInBytes>'])
topic_body += '</TopicDescription>'
return _create_entry(topic_body)
def _convert_queue_to_xml(queue):
'''
Converts a queue object to xml to send. The order of each field of queue
in xml is very important so we cann't simple call convert_class_to_xml.
queue: the queue object to be converted.
'''
queue_body = '<QueueDescription xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect">'
if queue:
if queue.lock_duration:
queue_body += ''.join(
['<LockDuration>',
str(queue.lock_duration),
'</LockDuration>'])
if queue.max_size_in_megabytes is not None:
queue_body += ''.join(
['<MaxSizeInMegabytes>',
str(queue.max_size_in_megabytes),
'</MaxSizeInMegabytes>'])
if queue.requires_duplicate_detection is not None:
queue_body += ''.join(
['<RequiresDuplicateDetection>',
str(queue.requires_duplicate_detection).lower(),
'</RequiresDuplicateDetection>'])
if queue.requires_session is not None:
queue_body += ''.join(
['<RequiresSession>',
str(queue.requires_session).lower(),
'</RequiresSession>'])
if queue.default_message_time_to_live is not None:
queue_body += ''.join(
['<DefaultMessageTimeToLive>',
str(queue.default_message_time_to_live),
'</DefaultMessageTimeToLive>'])
if queue.dead_lettering_on_message_expiration is not None:
queue_body += ''.join(
['<DeadLetteringOnMessageExpiration>',
str(queue.dead_lettering_on_message_expiration).lower(),
'</DeadLetteringOnMessageExpiration>'])
if queue.duplicate_detection_history_time_window is not None:
queue_body += ''.join(
['<DuplicateDetectionHistoryTimeWindow>',
str(queue.duplicate_detection_history_time_window),
'</DuplicateDetectionHistoryTimeWindow>'])
if queue.max_delivery_count is not None:
queue_body += ''.join(
['<MaxDeliveryCount>',
str(queue.max_delivery_count),
'</MaxDeliveryCount>'])
if queue.enable_batched_operations is not None:
queue_body += ''.join(
['<EnableBatchedOperations>',
str(queue.enable_batched_operations).lower(),
'</EnableBatchedOperations>'])
if queue.size_in_bytes is not None:
queue_body += ''.join(
['<SizeInBytes>',
str(queue.size_in_bytes),
'</SizeInBytes>'])
if queue.message_count is not None:
queue_body += ''.join(
['<MessageCount>',
str(queue.message_count),
'</MessageCount>'])
queue_body += '</QueueDescription>'
return _create_entry(queue_body)
def _service_bus_error_handler(http_error):
''' Simple error handler for service bus service. '''
return _general_error_handler(http_error)
from azure.servicebus.servicebusservice import ServiceBusService

View File

@ -0,0 +1,914 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
import time
from azure import (
WindowsAzureError,
SERVICE_BUS_HOST_BASE,
_convert_response_to_feeds,
_dont_fail_not_exist,
_dont_fail_on_exist,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_str,
_update_request_uri_query,
url_quote,
url_unquote,
_validate_not_none,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicebus import (
AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY,
AZURE_SERVICEBUS_ISSUER,
_convert_topic_to_xml,
_convert_response_to_topic,
_convert_queue_to_xml,
_convert_response_to_queue,
_convert_subscription_to_xml,
_convert_response_to_subscription,
_convert_rule_to_xml,
_convert_response_to_rule,
_convert_xml_to_queue,
_convert_xml_to_topic,
_convert_xml_to_subscription,
_convert_xml_to_rule,
_create_message,
_service_bus_error_handler,
)
# Token cache for Authentication
# Shared by the different instances of ServiceBusService
_tokens = {}
class ServiceBusService(object):
def __init__(self, service_namespace=None, account_key=None, issuer=None,
x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE):
# x_ms_version is not used, but the parameter is kept for backwards
# compatibility
self.requestid = None
self.service_namespace = service_namespace
self.account_key = account_key
self.issuer = issuer
self.host_base = host_base
# Get service namespace, account key and issuer.
# If they are set when constructing, then use them, else find them
# from environment variables.
if not self.service_namespace:
self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)
if not self.account_key:
self.account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)
if not self.issuer:
self.issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)
if not self.service_namespace or \
not self.account_key or not self.issuer:
raise WindowsAzureError(
'You need to provide servicebus namespace, access key and Issuer')
self._httpclient = _HTTPClient(service_instance=self,
service_namespace=self.service_namespace,
account_key=self.account_key,
issuer=self.issuer)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
'''
res = ServiceBusService(self.service_namespace, self.account_key,
self.issuer)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name: Name of the queue to create.
queue: Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Deletes an existing queue. This operation will also remove all
associated state including messages in the queue.
queue_name: Name of the queue to delete.
fail_not_exist:
Specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue(self, queue_name):
'''
Retrieves an existing queue.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_queue(response)
def list_queues(self):
'''
Enumerates the queues in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_queue)
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name: Name of the topic to create.
topic: Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_topic(self, topic_name, fail_not_exist=False):
'''
Deletes an existing topic. This operation will also remove all
associated state including associated subscriptions.
topic_name: Name of the topic to delete.
fail_not_exist:
Specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_topic(response)
def list_topics(self):
'''
Retrieves the topics in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_topic)
def create_rule(self, topic_name, subscription_name, rule_name, rule=None,
fail_on_exist=False):
'''
Creates a new rule. Once created, this rule's resource manifest is
immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
fail_on_exist:
Specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.body = _get_request_body(_convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_rule(self, topic_name, subscription_name, rule_name,
fail_not_exist=False):
'''
Deletes an existing rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name:
Name of the rule to delete. DEFAULT_RULE_NAME=$Default.
Use DEFAULT_RULE_NAME to delete default rule for the subscription.
fail_not_exist:
Specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_rule(response)
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_rule)
def create_subscription(self, topic_name, subscription_name,
subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.body = _get_request_body(
_convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_subscription(self, topic_name, subscription_name,
fail_not_exist=False):
'''
Deletes an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription to delete.
fail_not_exist:
Specify whether to throw an exception when the subscription
doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_subscription(response)
def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response,
_convert_xml_to_subscription)
def send_topic_message(self, topic_name, message=None):
'''
Enqueues a message into the specified topic. The limit to the number
of messages which may be present in the topic is governed by the
message size in MaxTopicSizeInBytes. If this message causes the topic
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
topic_name: Name of the topic.
message: Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only(
'message.body', message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
This operation is used to atomically retrieve and lock a message for
processing. The message is guaranteed not to be delivered to other
receivers during the lock duration period specified in buffer
description. Once the lock expires, the message will be available to
other receivers (on the same subscription only) during the lock
duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to
complete processing of the message, the receiver should issue a delete
command with the lock ID received from this operation. To abandon
processing of the message and unlock it for other receivers, an Unlock
Message command should be issued, or the lock duration period can
expire.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Unlock a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
Read and delete a message from a subscription as an atomic operation.
This operation should be used when a best-effort guarantee is
sufficient for an application; that is, using this operation it is
possible for messages to be lost if processing fails.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the
subscription. This operation should only be called after processing a
previously locked message is successful to maintain At-Least-Once
delivery assurances.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def send_queue_message(self, queue_name, message=None):
'''
Sends a message into the specified queue. The limit to the number of
messages which may be present in the topic is governed by the message
size the MaxTopicSizeInMegaBytes. If this message will cause the queue
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
queue_name: Name of the queue.
message: Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only('message.body',
message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
Automically retrieves and locks a message from a queue for processing.
The message is guaranteed not to be delivered to other receivers (on
the same subscription only) during the lock duration period specified
in the queue description. Once the lock expires, the message will be
available to other receivers. In order to complete processing of the
message, the receiver should issue a delete command with the lock ID
received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be
issued, or the lock duration period can expire.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
Unlocks a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
Reads and deletes a message from a queue as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient
for an application; that is, using this operation it is possible for
messages to be lost if processing fails.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the queue.
This operation should only be called after processing a previously
locked message is successful to maintain At-Least-Once delivery
assurances.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
'''
Receive a message from a queue for processing.
queue_name: Name of the queue.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name,
peek_lock=True, timeout=60):
'''
Receive a message from a subscription for processing.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_subscription_message(topic_name,
subscription_name,
timeout)
else:
return self.read_delete_subscription_message(topic_name,
subscription_name,
timeout)
def _get_host(self):
return self.service_namespace + self.host_base
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _service_bus_error_handler(ex)
return resp
def _update_service_bus_header(self, request):
''' Add additional headers for service bus. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, _ in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(
('Content-Type',
'application/atom+xml;type=entry;charset=utf-8'))
# Adds authoriaztion header for authentication.
request.headers.append(
('Authorization', self._sign_service_bus_request(request)))
return request.headers
def _sign_service_bus_request(self, request):
''' return the signed string with token. '''
return 'WRAP access_token="' + \
self._get_token(request.host, request.path) + '"'
def _token_is_expired(self, token):
''' Check if token expires or not. '''
time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')
time_pos_end = token.find('&', time_pos_begin)
token_expire_time = int(token[time_pos_begin:time_pos_end])
time_now = time.mktime(time.localtime())
# Adding 30 seconds so the token wouldn't be expired when we send the
# token to server.
return (token_expire_time - time_now) < 30
def _get_token(self, host, path):
'''
Returns token for the request.
host: the service bus service request.
path: the service bus service request.
'''
wrap_scope = 'http://' + host + path + self.issuer + self.account_key
# Check whether has unexpired cache, return cached token if it is still
# usable.
if wrap_scope in _tokens:
token = _tokens[wrap_scope]
if not self._token_is_expired(token):
return token
# get token from accessconstrol server
request = HTTPRequest()
request.protocol_override = 'https'
request.host = host.replace('.servicebus.', '-sb.accesscontrol.')
request.method = 'POST'
request.path = '/WRAPv0.9'
request.body = ('wrap_name=' + url_quote(self.issuer) +
'&wrap_password=' + url_quote(self.account_key) +
'&wrap_scope=' +
url_quote('http://' + host + path)).encode('utf-8')
request.headers.append(('Content-Length', str(len(request.body))))
resp = self._httpclient.perform_request(request)
token = resp.body.decode('utf-8')
token = url_unquote(token[token.find('=') + 1:token.rfind('&')])
_tokens[wrap_scope] = token
return token

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,113 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
MANAGEMENT_HOST,
_convert_response_to_feeds,
_str,
_validate_not_none,
)
from azure.servicemanagement import (
_ServiceBusManagementXmlSerializer,
)
from azure.servicemanagement.servicemanagementclient import (
_ServiceManagementClient,
)
class ServiceBusManagementService(_ServiceManagementClient):
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST):
super(ServiceBusManagementService, self).__init__(
subscription_id, cert_file, host)
#--Operations for service bus ----------------------------------------
def get_regions(self):
'''
Get list of available service bus regions.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Regions/', None),
None)
return _convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_region)
def list_namespaces(self):
'''
List the service bus namespaces defined on the account.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Namespaces/', None),
None)
return _convert_response_to_feeds(
response,
_ServiceBusManagementXmlSerializer.xml_to_namespace)
def get_namespace(self, name):
'''
Get details about a specific namespace.
name: Name of the service bus namespace.
'''
response = self._perform_get(
self._get_path('services/serviceBus/Namespaces', name),
None)
return _ServiceBusManagementXmlSerializer.xml_to_namespace(
response.body)
def create_namespace(self, name, region):
'''
Create a new service bus namespace.
name: Name of the service bus namespace to create.
region: Region to create the namespace in.
'''
_validate_not_none('name', name)
return self._perform_put(
self._get_path('services/serviceBus/Namespaces', name),
_ServiceBusManagementXmlSerializer.namespace_to_xml(region))
def delete_namespace(self, name):
'''
Delete a service bus namespace.
name: Name of the service bus namespace to delete.
'''
_validate_not_none('name', name)
return self._perform_delete(
self._get_path('services/serviceBus/Namespaces', name),
None)
def check_namespace_availability(self, name):
'''
Checks to see if the specified service bus namespace is available, or
if it has already been taken.
name: Name of the service bus namespace to validate.
'''
_validate_not_none('name', name)
response = self._perform_get(
self._get_path('services/serviceBus/CheckNamespaceAvailability',
None) + '/?namespace=' + _str(name), None)
return _ServiceBusManagementXmlSerializer.xml_to_namespace_availability(
response.body)

View File

@ -0,0 +1,166 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
from azure import (
WindowsAzureError,
MANAGEMENT_HOST,
_get_request_body,
_parse_response,
_str,
_update_request_uri_query,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicemanagement import (
AZURE_MANAGEMENT_CERTFILE,
AZURE_MANAGEMENT_SUBSCRIPTIONID,
_management_error_handler,
_parse_response_for_async_op,
_update_management_header,
)
class _ServiceManagementClient(object):
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST):
self.requestid = None
self.subscription_id = subscription_id
self.cert_file = cert_file
self.host = host
if not self.cert_file:
if AZURE_MANAGEMENT_CERTFILE in os.environ:
self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
if not self.subscription_id:
if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ:
self.subscription_id = os.environ[
AZURE_MANAGEMENT_SUBSCRIPTIONID]
if not self.cert_file or not self.subscription_id:
raise WindowsAzureError(
'You need to provide subscription id and certificate file')
self._httpclient = _HTTPClient(
service_instance=self, cert_file=self.cert_file)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''Returns a new service which will process requests with the
specified filter. Filtering operations can include logging, automatic
retrying, etc... The filter is a lambda which receives the HTTPRequest
and another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.'''
res = type(self)(self.subscription_id, self.cert_file, self.host)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
#--Helper functions --------------------------------------------------
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _management_error_handler(ex)
return resp
def _perform_get(self, path, response_type):
request = HTTPRequest()
request.method = 'GET'
request.host = self.host
request.path = path
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return _parse_response(response, response_type)
return response
def _perform_put(self, path, body, async=False):
request = HTTPRequest()
request.method = 'PUT'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if async:
return _parse_response_for_async_op(response)
return None
def _perform_post(self, path, body, response_type=None, async=False):
request = HTTPRequest()
request.method = 'POST'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return _parse_response(response, response_type)
if async:
return _parse_response_for_async_op(response)
return None
def _perform_delete(self, path, async=False):
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.host
request.path = path
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if async:
return _parse_response_for_async_op(response)
return None
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,913 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import hashlib
import hmac
import sys
import types
from datetime import datetime
from xml.dom import minidom
from azure import (WindowsAzureData,
WindowsAzureError,
METADATA_NS,
xml_escape,
_create_entry,
_decode_base64_to_text,
_decode_base64_to_bytes,
_encode_base64,
_fill_data_minidom,
_fill_instance_element,
_get_child_nodes,
_get_child_nodesNS,
_get_children_from_path,
_get_entry_properties,
_general_error_handler,
_list_of,
_parse_response_for_dict,
_unicode_type,
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
)
# x-ms-version for storage service.
X_MS_VERSION = '2012-02-12'
class EnumResultsBase(object):
''' base class for EnumResults. '''
def __init__(self):
self.prefix = u''
self.marker = u''
self.max_results = 0
self.next_marker = u''
class ContainerEnumResults(EnumResultsBase):
''' Blob Container list. '''
def __init__(self):
EnumResultsBase.__init__(self)
self.containers = _list_of(Container)
def __iter__(self):
return iter(self.containers)
def __len__(self):
return len(self.containers)
def __getitem__(self, index):
return self.containers[index]
class Container(WindowsAzureData):
''' Blob container class. '''
def __init__(self):
self.name = u''
self.url = u''
self.properties = Properties()
self.metadata = {}
class Properties(WindowsAzureData):
''' Blob container's properties class. '''
def __init__(self):
self.last_modified = u''
self.etag = u''
class RetentionPolicy(WindowsAzureData):
''' RetentionPolicy in service properties. '''
def __init__(self):
self.enabled = False
self.__dict__['days'] = None
def get_days(self):
# convert days to int value
return int(self.__dict__['days'])
def set_days(self, value):
''' set default days if days is set to empty. '''
self.__dict__['days'] = value
days = property(fget=get_days, fset=set_days)
class Logging(WindowsAzureData):
''' Logging class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.delete = False
self.read = False
self.write = False
self.retention_policy = RetentionPolicy()
class Metrics(WindowsAzureData):
''' Metrics class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.enabled = False
self.include_apis = None
self.retention_policy = RetentionPolicy()
class StorageServiceProperties(WindowsAzureData):
''' Storage Service Propeties class. '''
def __init__(self):
self.logging = Logging()
self.metrics = Metrics()
class AccessPolicy(WindowsAzureData):
''' Access Policy class in service properties. '''
def __init__(self, start=u'', expiry=u'', permission='u'):
self.start = start
self.expiry = expiry
self.permission = permission
class SignedIdentifier(WindowsAzureData):
''' Signed Identifier class for service properties. '''
def __init__(self):
self.id = u''
self.access_policy = AccessPolicy()
class SignedIdentifiers(WindowsAzureData):
''' SignedIdentifier list. '''
def __init__(self):
self.signed_identifiers = _list_of(SignedIdentifier)
def __iter__(self):
return iter(self.signed_identifiers)
def __len__(self):
return len(self.signed_identifiers)
def __getitem__(self, index):
return self.signed_identifiers[index]
class BlobEnumResults(EnumResultsBase):
''' Blob list.'''
def __init__(self):
EnumResultsBase.__init__(self)
self.blobs = _list_of(Blob)
self.prefixes = _list_of(BlobPrefix)
self.delimiter = ''
def __iter__(self):
return iter(self.blobs)
def __len__(self):
return len(self.blobs)
def __getitem__(self, index):
return self.blobs[index]
class BlobResult(bytes):
def __new__(cls, blob, properties):
return bytes.__new__(cls, blob if blob else b'')
def __init__(self, blob, properties):
self.properties = properties
class Blob(WindowsAzureData):
''' Blob class. '''
def __init__(self):
self.name = u''
self.snapshot = u''
self.url = u''
self.properties = BlobProperties()
self.metadata = {}
class BlobProperties(WindowsAzureData):
''' Blob Properties '''
def __init__(self):
self.last_modified = u''
self.etag = u''
self.content_length = 0
self.content_type = u''
self.content_encoding = u''
self.content_language = u''
self.content_md5 = u''
self.xms_blob_sequence_number = 0
self.blob_type = u''
self.lease_status = u''
self.lease_state = u''
self.lease_duration = u''
self.copy_id = u''
self.copy_source = u''
self.copy_status = u''
self.copy_progress = u''
self.copy_completion_time = u''
self.copy_status_description = u''
class BlobPrefix(WindowsAzureData):
''' BlobPrefix in Blob. '''
def __init__(self):
self.name = ''
class BlobBlock(WindowsAzureData):
''' BlobBlock class '''
def __init__(self, id=None, size=None):
self.id = id
self.size = size
class BlobBlockList(WindowsAzureData):
''' BlobBlockList class '''
def __init__(self):
self.committed_blocks = []
self.uncommitted_blocks = []
class PageRange(WindowsAzureData):
''' Page Range for page blob. '''
def __init__(self):
self.start = 0
self.end = 0
class PageList(object):
''' Page list for page blob. '''
def __init__(self):
self.page_ranges = _list_of(PageRange)
def __iter__(self):
return iter(self.page_ranges)
def __len__(self):
return len(self.page_ranges)
def __getitem__(self, index):
return self.page_ranges[index]
class QueueEnumResults(EnumResultsBase):
''' Queue list'''
def __init__(self):
EnumResultsBase.__init__(self)
self.queues = _list_of(Queue)
def __iter__(self):
return iter(self.queues)
def __len__(self):
return len(self.queues)
def __getitem__(self, index):
return self.queues[index]
class Queue(WindowsAzureData):
''' Queue class '''
def __init__(self):
self.name = u''
self.url = u''
self.metadata = {}
class QueueMessagesList(WindowsAzureData):
''' Queue message list. '''
def __init__(self):
self.queue_messages = _list_of(QueueMessage)
def __iter__(self):
return iter(self.queue_messages)
def __len__(self):
return len(self.queue_messages)
def __getitem__(self, index):
return self.queue_messages[index]
class QueueMessage(WindowsAzureData):
''' Queue message class. '''
def __init__(self):
self.message_id = u''
self.insertion_time = u''
self.expiration_time = u''
self.pop_receipt = u''
self.time_next_visible = u''
self.dequeue_count = u''
self.message_text = u''
class Entity(WindowsAzureData):
''' Entity class. The attributes of entity will be created dynamically. '''
pass
class EntityProperty(WindowsAzureData):
''' Entity property. contains type and value. '''
def __init__(self, type=None, value=None):
self.type = type
self.value = value
class Table(WindowsAzureData):
''' Only for intellicens and telling user the return type. '''
pass
def _parse_blob_enum_results_list(response):
respbody = response.body
return_obj = BlobEnumResults()
doc = minidom.parseString(respbody)
for enum_results in _get_child_nodes(doc, 'EnumerationResults'):
for child in _get_children_from_path(enum_results, 'Blobs', 'Blob'):
return_obj.blobs.append(_fill_instance_element(child, Blob))
for child in _get_children_from_path(enum_results,
'Blobs',
'BlobPrefix'):
return_obj.prefixes.append(
_fill_instance_element(child, BlobPrefix))
for name, value in vars(return_obj).items():
if name == 'blobs' or name == 'prefixes':
continue
value = _fill_data_minidom(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
return return_obj
def _update_storage_header(request):
''' add additional headers for storage request. '''
if request.body:
assert isinstance(request.body, bytes)
# if it is PUT, POST, MERGE, DELETE, need to add content-lengt to header.
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# append addtional headers base on the service
request.headers.append(('x-ms-version', X_MS_VERSION))
# append x-ms-meta name, values to header
for name, value in request.headers:
if 'x-ms-meta-name-values' in name and value:
for meta_name, meta_value in value.items():
request.headers.append(('x-ms-meta-' + meta_name, meta_value))
request.headers.remove((name, value))
break
return request
def _update_storage_blob_header(request, account_name, account_key):
''' add additional headers for storage blob request. '''
request = _update_storage_header(request)
current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers.append(('x-ms-date', current_time))
request.headers.append(
('Content-Type', 'application/octet-stream Charset=UTF-8'))
request.headers.append(('Authorization',
_sign_storage_blob_request(request,
account_name,
account_key)))
return request.headers
def _update_storage_queue_header(request, account_name, account_key):
''' add additional headers for storage queue request. '''
return _update_storage_blob_header(request, account_name, account_key)
def _update_storage_table_header(request):
''' add additional headers for storage table request. '''
request = _update_storage_header(request)
for name, _ in request.headers:
if name.lower() == 'content-type':
break
else:
request.headers.append(('Content-Type', 'application/atom+xml'))
request.headers.append(('DataServiceVersion', '2.0;NetFx'))
request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))
current_time = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers.append(('x-ms-date', current_time))
request.headers.append(('Date', current_time))
return request.headers
def _sign_storage_blob_request(request, account_name, account_key):
'''
Returns the signed string for blob request which is used to set
Authorization header. This is also used to sign queue request.
'''
uri_path = request.path.split('?')[0]
# method to sign
string_to_sign = request.method + '\n'
# get headers to sign
headers_to_sign = [
'content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'range']
request_header_dict = dict((name.lower(), value)
for name, value in request.headers if value)
string_to_sign += '\n'.join(request_header_dict.get(x, '')
for x in headers_to_sign) + '\n'
# get x-ms header to sign
x_ms_headers = []
for name, value in request.headers:
if 'x-ms' in name:
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value:
string_to_sign += ''.join([name, ':', value, '\n'])
# get account_name and uri path to sign
string_to_sign += '/' + account_name + uri_path
# get query string to sign if it is not table service
query_to_sign = request.query
query_to_sign.sort()
current_name = ''
for name, value in query_to_sign:
if value:
if current_name != name:
string_to_sign += '\n' + name + ':' + value
else:
string_to_sign += '\n' + ',' + value
# sign the request
auth_string = 'SharedKey ' + account_name + ':' + \
_sign_string(account_key, string_to_sign)
return auth_string
def _sign_storage_table_request(request, account_name, account_key):
uri_path = request.path.split('?')[0]
string_to_sign = request.method + '\n'
headers_to_sign = ['content-md5', 'content-type', 'date']
request_header_dict = dict((name.lower(), value)
for name, value in request.headers if value)
string_to_sign += '\n'.join(request_header_dict.get(x, '')
for x in headers_to_sign) + '\n'
# get account_name and uri path to sign
string_to_sign += ''.join(['/', account_name, uri_path])
for name, value in request.query:
if name == 'comp' and uri_path == '/':
string_to_sign += '?comp=' + value
break
# sign the request
auth_string = 'SharedKey ' + account_name + ':' + \
_sign_string(account_key, string_to_sign)
return auth_string
def _sign_string(account_key, string_to_sign):
decoded_account_key = _decode_base64_to_bytes(account_key)
if isinstance(string_to_sign, _unicode_type):
string_to_sign = string_to_sign.encode('utf-8')
signed_hmac_sha256 = hmac.HMAC(
decoded_account_key, string_to_sign, hashlib.sha256)
digest = signed_hmac_sha256.digest()
encoded_digest = _encode_base64(digest)
return encoded_digest
def _to_python_bool(value):
if value.lower() == 'true':
return True
return False
def _to_entity_int(data):
int_max = (2 << 30) - 1
if data > (int_max) or data < (int_max + 1) * (-1):
return 'Edm.Int64', str(data)
else:
return 'Edm.Int32', str(data)
def _to_entity_bool(value):
if value:
return 'Edm.Boolean', 'true'
return 'Edm.Boolean', 'false'
def _to_entity_datetime(value):
return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%S')
def _to_entity_float(value):
return 'Edm.Double', str(value)
def _to_entity_property(value):
if value.type == 'Edm.Binary':
return value.type, _encode_base64(value.value)
return value.type, str(value.value)
def _to_entity_none(value):
return None, None
def _to_entity_str(value):
return 'Edm.String', value
# Tables of conversions to and from entity types. We support specific
# datatypes, and beyond that the user can use an EntityProperty to get
# custom data type support.
def _from_entity_binary(value):
return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value))
def _from_entity_int(value):
return int(value)
def _from_entity_datetime(value):
format = '%Y-%m-%dT%H:%M:%S'
if '.' in value:
format = format + '.%f'
if value.endswith('Z'):
format = format + 'Z'
return datetime.strptime(value, format)
_ENTITY_TO_PYTHON_CONVERSIONS = {
'Edm.Binary': _from_entity_binary,
'Edm.Int32': _from_entity_int,
'Edm.Int64': _from_entity_int,
'Edm.Double': float,
'Edm.Boolean': _to_python_bool,
'Edm.DateTime': _from_entity_datetime,
}
# Conversion from Python type to a function which returns a tuple of the
# type string and content string.
_PYTHON_TO_ENTITY_CONVERSIONS = {
int: _to_entity_int,
bool: _to_entity_bool,
datetime: _to_entity_datetime,
float: _to_entity_float,
EntityProperty: _to_entity_property,
str: _to_entity_str,
}
if sys.version_info < (3,):
_PYTHON_TO_ENTITY_CONVERSIONS.update({
long: _to_entity_int,
types.NoneType: _to_entity_none,
unicode: _to_entity_str,
})
def _convert_entity_to_xml(source):
''' Converts an entity object to xml to send.
The entity format is:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
# construct the entity body included in <m:properties> and </m:properties>
entity_body = '<m:properties xml:space="preserve">{properties}</m:properties>'
if isinstance(source, WindowsAzureData):
source = vars(source)
properties_str = ''
# set properties type for types we know if value has no type info.
# if value has type info, then set the type to value.type
for name, value in source.items():
mtype = ''
conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
if conv is None and sys.version_info >= (3,) and value is None:
conv = _to_entity_none
if conv is None:
raise WindowsAzureError(
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(
type(value).__name__))
mtype, value = conv(value)
# form the property node
properties_str += ''.join(['<d:', name])
if value is None:
properties_str += ' m:null="true" />'
else:
if mtype:
properties_str += ''.join([' m:type="', mtype, '"'])
properties_str += ''.join(['>',
xml_escape(value), '</d:', name, '>'])
if sys.version_info < (3,):
if isinstance(properties_str, unicode):
properties_str = properties_str.encode(encoding='utf-8')
# generate the entity_body
entity_body = entity_body.format(properties=properties_str)
xmlstr = _create_entry(entity_body)
return xmlstr
def _convert_table_to_xml(table_name):
'''
Create xml to send for a given table name. Since xml format for table is
the same as entity and the only difference is that table has only one
property 'TableName', so we just call _convert_entity_to_xml.
table_name: the name of the table
'''
return _convert_entity_to_xml({'TableName': table_name})
def _convert_block_list_to_xml(block_id_list):
'''
Convert a block list to xml to send.
block_id_list:
a str list containing the block ids that are used in put_block_list.
Only get block from latest blocks.
'''
if block_id_list is None:
return ''
xml = '<?xml version="1.0" encoding="utf-8"?><BlockList>'
for value in block_id_list:
xml += '<Latest>{0}</Latest>'.format(_encode_base64(value))
return xml + '</BlockList>'
def _create_blob_result(response):
blob_properties = _parse_response_for_dict(response)
return BlobResult(response.body, blob_properties)
def _convert_response_to_block_list(response):
'''
Converts xml response to block list class.
'''
blob_block_list = BlobBlockList()
xmldoc = minidom.parseString(response.body)
for xml_block in _get_children_from_path(xmldoc,
'BlockList',
'CommittedBlocks',
'Block'):
xml_block_id = _decode_base64_to_text(
_get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)
xml_block_size = int(
_get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)
blob_block_list.committed_blocks.append(
BlobBlock(xml_block_id, xml_block_size))
for xml_block in _get_children_from_path(xmldoc,
'BlockList',
'UncommittedBlocks',
'Block'):
xml_block_id = _decode_base64_to_text(
_get_child_nodes(xml_block, 'Name')[0].firstChild.nodeValue)
xml_block_size = int(
_get_child_nodes(xml_block, 'Size')[0].firstChild.nodeValue)
blob_block_list.uncommitted_blocks.append(
BlobBlock(xml_block_id, xml_block_size))
return blob_block_list
def _remove_prefix(name):
colon = name.find(':')
if colon != -1:
return name[colon + 1:]
return name
def _convert_response_to_entity(response):
if response is None:
return response
return _convert_xml_to_entity(response.body)
def _convert_xml_to_entity(xmlstr):
''' Convert xml response to entity.
The format of entity:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
xml_properties = None
for entry in _get_child_nodes(xmldoc, 'entry'):
for content in _get_child_nodes(entry, 'content'):
# TODO: Namespace
xml_properties = _get_child_nodesNS(
content, METADATA_NS, 'properties')
if not xml_properties:
return None
entity = Entity()
# extract each property node and get the type from attribute and node value
for xml_property in xml_properties[0].childNodes:
name = _remove_prefix(xml_property.nodeName)
# exclude the Timestamp since it is auto added by azure when
# inserting entity. We don't want this to mix with real properties
if name in ['Timestamp']:
continue
if xml_property.firstChild:
value = xml_property.firstChild.nodeValue
else:
value = ''
isnull = xml_property.getAttributeNS(METADATA_NS, 'null')
mtype = xml_property.getAttributeNS(METADATA_NS, 'type')
# if not isnull and no type info, then it is a string and we just
# need the str type to hold the property.
if not isnull and not mtype:
_set_entity_attr(entity, name, value)
elif isnull == 'true':
if mtype:
property = EntityProperty(mtype, None)
else:
property = EntityProperty('Edm.String', None)
else: # need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
property = conv(value)
else:
property = EntityProperty(mtype, value)
_set_entity_attr(entity, name, property)
# extract id, updated and name value from feed entry and set them of
# rule.
for name, value in _get_entry_properties(xmlstr, True).items():
if name in ['etag']:
_set_entity_attr(entity, name, value)
return entity
def _set_entity_attr(entity, name, value):
try:
setattr(entity, name, value)
except UnicodeEncodeError:
# Python 2 doesn't support unicode attribute names, so we'll
# add them and access them directly through the dictionary
entity.__dict__[name] = value
def _convert_xml_to_table(xmlstr):
''' Converts the xml response to table class.
Simply call convert_xml_to_entity and extract the table name, and add
updated and author info
'''
table = Table()
entity = _convert_xml_to_entity(xmlstr)
setattr(table, 'name', entity.TableName)
for name, value in _get_entry_properties(xmlstr, False).items():
setattr(table, name, value)
return table
def _storage_error_handler(http_error):
''' Simple error handler for storage service. '''
return _general_error_handler(http_error)
# make these available just from storage.
from azure.storage.blobservice import BlobService
from azure.storage.queueservice import QueueService
from azure.storage.tableservice import TableService
from azure.storage.cloudstorageaccount import CloudStorageAccount
from azure.storage.sharedaccesssignature import (
SharedAccessSignature,
SharedAccessPolicy,
Permission,
WebResource,
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.storage.blobservice import BlobService
from azure.storage.tableservice import TableService
from azure.storage.queueservice import QueueService
class CloudStorageAccount(object):
"""
Provides a factory for creating the blob, queue, and table services
with a common account name and account key. Users can either use the
factory or can construct the appropriate service directly.
"""
def __init__(self, account_name=None, account_key=None):
self.account_name = account_name
self.account_key = account_key
def create_blob_service(self):
return BlobService(self.account_name, self.account_key)
def create_table_service(self):
return TableService(self.account_name, self.account_key)
def create_queue_service(self):
return QueueService(self.account_name, self.account_key)

View File

@ -0,0 +1,458 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
WindowsAzureConflictError,
WindowsAzureError,
DEV_QUEUE_HOST,
QUEUE_SERVICE_HOST_BASE,
xml_escape,
_convert_class_to_xml,
_dont_fail_not_exist,
_dont_fail_on_exist,
_get_request_body,
_int_or_none,
_parse_enum_results_list,
_parse_response,
_parse_response_for_dict_filter,
_parse_response_for_dict_prefix,
_str,
_str_or_none,
_update_request_uri_query_local_storage,
_validate_not_none,
_ERROR_CONFLICT,
)
from azure.http import (
HTTPRequest,
HTTP_RESPONSE_NO_CONTENT,
)
from azure.storage import (
Queue,
QueueEnumResults,
QueueMessagesList,
StorageServiceProperties,
_update_storage_queue_header,
)
from azure.storage.storageclient import _StorageClient
class QueueService(_StorageClient):
'''
This is the main class managing queue resources.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to http.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
super(QueueService, self).__init__(
account_name, account_key, protocol, host_base, dev_host)
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including
Windows Azure Storage Analytics.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def list_queues(self, prefix=None, marker=None, maxresults=None,
include=None):
'''
Lists all of the queues in a given storage account.
prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
marker:
A string value that identifies the portion of the list to be
returned with the next list operation. The operation returns a
NextMarker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
maxresults:
Specifies the maximum number of queues to return. If maxresults is
not specified, the server will return up to 5,000 items.
include:
Optional. Include this parameter to specify that the container's
metadata be returned as part of the response body.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_enum_results_list(
response, QueueEnumResults, "Queues", Queue)
def create_queue(self, queue_name, x_ms_meta_name_values=None,
fail_on_exist=False):
'''
Creates a queue under the given account.
queue_name: name of the queue.
x_ms_meta_name_values:
Optional. A dict containing name-value pairs to associate with the
queue as metadata.
fail_on_exist: Specify whether throw exception when queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
if not fail_on_exist:
try:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
return False
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
response = self._perform_request(request)
if response.status == HTTP_RESPONSE_NO_CONTENT:
raise WindowsAzureConflictError(
_ERROR_CONFLICT.format(response.message))
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Permanently deletes the specified queue.
queue_name: Name of the queue.
fail_not_exist:
Specify whether throw exception when queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue_metadata(self, queue_name):
'''
Retrieves user-defined metadata and queue properties on the specified
queue. Metadata is associated with the queue as name-values pairs.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(
response,
prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])
def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
'''
Sets user-defined metadata on the specified queue. Metadata is
associated with the queue as name-value pairs.
queue_name: Name of the queue.
x_ms_meta_name_values:
Optional. A dict containing name-value pairs to associate with the
queue as metadata.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def put_message(self, queue_name, message_text, visibilitytimeout=None,
messagettl=None):
'''
Adds a new message to the back of the message queue. A visibility
timeout can also be specified to make the message invisible until the
visibility timeout expires. A message must be in a format that can be
included in an XML request with UTF-8 encoding. The encoded message can
be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size
for previous versions.
queue_name: Name of the queue.
message_text: Message content.
visibilitytimeout:
Optional. If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The new value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibilitytimeout
should be set to a value smaller than the time-to-live value.
messagettl:
Optional. Specifies the time-to-live interval for the message, in
seconds. The maximum time-to-live allowed is 7 days. If this
parameter is omitted, the default time-to-live is 7 days.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_text', message_text)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.query = [
('visibilitytimeout', _str_or_none(visibilitytimeout)),
('messagettl', _str_or_none(messagettl))
]
request.body = _get_request_body(
'<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def get_messages(self, queue_name, numofmessages=None,
visibilitytimeout=None):
'''
Retrieves one or more messages from the front of the queue.
queue_name: Name of the queue.
numofmessages:
Optional. A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If
fewer are visible, the visible messages are returned. By default,
a single message is retrieved from the queue with this operation.
visibilitytimeout:
Specifies the new visibility timeout value, in seconds, relative
to server time. The new value must be larger than or equal to 1
second, and cannot be larger than 7 days, or larger than 2 hours
on REST protocol versions prior to version 2011-08-18. The
visibility timeout of a message can be set to a value later than
the expiry time.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.query = [
('numofmessages', _str_or_none(numofmessages)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def peek_messages(self, queue_name, numofmessages=None):
'''
Retrieves one or more messages from the front of the queue, but does
not alter the visibility of the message.
queue_name: Name of the queue.
numofmessages:
Optional. A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages?peekonly=true'
request.query = [('numofmessages', _str_or_none(numofmessages))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response(response, QueueMessagesList)
def delete_message(self, queue_name, message_id, popreceipt):
'''
Deletes the specified message.
queue_name: Name of the queue.
message_id: Message to delete.
popreceipt:
Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('popreceipt', popreceipt)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(queue_name) + '/messages/' + _str(message_id) + ''
request.query = [('popreceipt', _str_or_none(popreceipt))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def clear_messages(self, queue_name):
'''
Deletes all messages from the specified queue.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)
def update_message(self, queue_name, message_id, message_text, popreceipt,
visibilitytimeout):
'''
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
queue_name: Name of the queue.
message_id: Message to update.
message_text: Content of message.
popreceipt:
Required. A valid pop receipt value returned from an earlier call
to the Get Messages or Update Message operation.
visibilitytimeout:
Required. Specifies the new visibility timeout value, in seconds,
relative to server time. The new value must be larger than or equal
to 0, and cannot be larger than 7 days. The visibility timeout of a
message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('message_text', message_text)
_validate_not_none('popreceipt', popreceipt)
_validate_not_none('visibilitytimeout', visibilitytimeout)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(queue_name) + '/messages/' + _str(message_id) + ''
request.query = [
('popreceipt', _str_or_none(popreceipt)),
('visibilitytimeout', _str_or_none(visibilitytimeout))
]
request.body = _get_request_body(
'<?xml version="1.0" encoding="utf-8"?> \
<QueueMessage> \
<MessageText>' + xml_escape(_str(message_text)) + '</MessageText> \
</QueueMessage>')
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
response = self._perform_request(request)
return _parse_response_for_dict_filter(
response,
filter=['x-ms-popreceipt', 'x-ms-time-next-visible'])
def set_queue_service_properties(self, storage_service_properties,
timeout=None):
'''
Sets the properties of a storage account's Queue service, including
Windows Azure Storage Analytics.
storage_service_properties: StorageServiceProperties object.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('storage_service_properties',
storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(
_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.account_name, self.account_key)
self._perform_request(request)

View File

@ -0,0 +1,230 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import url_quote
from azure.storage import _sign_string, X_MS_VERSION
#-------------------------------------------------------------------------
# Constants for the share access signature
SIGNED_START = 'st'
SIGNED_EXPIRY = 'se'
SIGNED_RESOURCE = 'sr'
SIGNED_PERMISSION = 'sp'
SIGNED_IDENTIFIER = 'si'
SIGNED_SIGNATURE = 'sig'
SIGNED_VERSION = 'sv'
RESOURCE_BLOB = 'b'
RESOURCE_CONTAINER = 'c'
SIGNED_RESOURCE_TYPE = 'resource'
SHARED_ACCESS_PERMISSION = 'permission'
#--------------------------------------------------------------------------
class WebResource(object):
'''
Class that stands for the resource to get the share access signature
path: the resource path.
properties: dict of name and values. Contains 2 item: resource type and
permission
request_url: the url of the webresource include all the queries.
'''
def __init__(self, path=None, request_url=None, properties=None):
self.path = path
self.properties = properties or {}
self.request_url = request_url
class Permission(object):
'''
Permission class. Contains the path and query_string for the path.
path: the resource path
query_string: dict of name, values. Contains SIGNED_START, SIGNED_EXPIRY
SIGNED_RESOURCE, SIGNED_PERMISSION, SIGNED_IDENTIFIER,
SIGNED_SIGNATURE name values.
'''
def __init__(self, path=None, query_string=None):
self.path = path
self.query_string = query_string
class SharedAccessPolicy(object):
''' SharedAccessPolicy class. '''
def __init__(self, access_policy, signed_identifier=None):
self.id = signed_identifier
self.access_policy = access_policy
class SharedAccessSignature(object):
'''
The main class used to do the signing and generating the signature.
account_name:
the storage account name used to generate shared access signature
account_key: the access key to genenerate share access signature
permission_set: the permission cache used to signed the request url.
'''
def __init__(self, account_name, account_key, permission_set=None):
self.account_name = account_name
self.account_key = account_key
self.permission_set = permission_set
def generate_signed_query_string(self, path, resource_type,
shared_access_policy,
version=X_MS_VERSION):
'''
Generates the query string for path, resource type and shared access
policy.
path: the resource
resource_type: could be blob or container
shared_access_policy: shared access policy
version:
x-ms-version for storage service, or None to get a signed query
string compatible with pre 2012-02-12 clients, where the version
is not included in the query string.
'''
query_string = {}
if shared_access_policy.access_policy.start:
query_string[
SIGNED_START] = shared_access_policy.access_policy.start
if version:
query_string[SIGNED_VERSION] = version
query_string[SIGNED_EXPIRY] = shared_access_policy.access_policy.expiry
query_string[SIGNED_RESOURCE] = resource_type
query_string[
SIGNED_PERMISSION] = shared_access_policy.access_policy.permission
if shared_access_policy.id:
query_string[SIGNED_IDENTIFIER] = shared_access_policy.id
query_string[SIGNED_SIGNATURE] = self._generate_signature(
path, shared_access_policy, version)
return query_string
def sign_request(self, web_resource):
''' sign request to generate request_url with sharedaccesssignature
info for web_resource.'''
if self.permission_set:
for shared_access_signature in self.permission_set:
if self._permission_matches_request(
shared_access_signature, web_resource,
web_resource.properties[
SIGNED_RESOURCE_TYPE],
web_resource.properties[SHARED_ACCESS_PERMISSION]):
if web_resource.request_url.find('?') == -1:
web_resource.request_url += '?'
else:
web_resource.request_url += '&'
web_resource.request_url += self._convert_query_string(
shared_access_signature.query_string)
break
return web_resource
def _convert_query_string(self, query_string):
''' Converts query string to str. The order of name, values is very
important and can't be wrong.'''
convert_str = ''
if SIGNED_START in query_string:
convert_str += SIGNED_START + '=' + \
url_quote(query_string[SIGNED_START]) + '&'
convert_str += SIGNED_EXPIRY + '=' + \
url_quote(query_string[SIGNED_EXPIRY]) + '&'
convert_str += SIGNED_PERMISSION + '=' + \
query_string[SIGNED_PERMISSION] + '&'
convert_str += SIGNED_RESOURCE + '=' + \
query_string[SIGNED_RESOURCE] + '&'
if SIGNED_IDENTIFIER in query_string:
convert_str += SIGNED_IDENTIFIER + '=' + \
query_string[SIGNED_IDENTIFIER] + '&'
if SIGNED_VERSION in query_string:
convert_str += SIGNED_VERSION + '=' + \
query_string[SIGNED_VERSION] + '&'
convert_str += SIGNED_SIGNATURE + '=' + \
url_quote(query_string[SIGNED_SIGNATURE]) + '&'
return convert_str
def _generate_signature(self, path, shared_access_policy, version):
''' Generates signature for a given path and shared access policy. '''
def get_value_to_append(value, no_new_line=False):
return_value = ''
if value:
return_value = value
if not no_new_line:
return_value += '\n'
return return_value
if path[0] != '/':
path = '/' + path
canonicalized_resource = '/' + self.account_name + path
# Form the string to sign from shared_access_policy and canonicalized
# resource. The order of values is important.
string_to_sign = \
(get_value_to_append(shared_access_policy.access_policy.permission) +
get_value_to_append(shared_access_policy.access_policy.start) +
get_value_to_append(shared_access_policy.access_policy.expiry) +
get_value_to_append(canonicalized_resource))
if version:
string_to_sign += get_value_to_append(shared_access_policy.id)
string_to_sign += get_value_to_append(version, True)
else:
string_to_sign += get_value_to_append(shared_access_policy.id, True)
return self._sign(string_to_sign)
def _permission_matches_request(self, shared_access_signature,
web_resource, resource_type,
required_permission):
''' Check whether requested permission matches given
shared_access_signature, web_resource and resource type. '''
required_resource_type = resource_type
if required_resource_type == RESOURCE_BLOB:
required_resource_type += RESOURCE_CONTAINER
for name, value in shared_access_signature.query_string.items():
if name == SIGNED_RESOURCE and \
required_resource_type.find(value) == -1:
return False
elif name == SIGNED_PERMISSION and \
required_permission.find(value) == -1:
return False
return web_resource.path.find(shared_access_signature.path) != -1
def _sign(self, string_to_sign):
''' use HMAC-SHA256 to sign the string and convert it as base64
encoded string. '''
return _sign_string(self.account_key, string_to_sign)

View File

@ -0,0 +1,152 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
import sys
from azure import (
WindowsAzureError,
DEV_ACCOUNT_NAME,
DEV_ACCOUNT_KEY,
_ERROR_STORAGE_MISSING_INFO,
)
from azure.http import HTTPError
from azure.http.httpclient import _HTTPClient
from azure.storage import _storage_error_handler
#--------------------------------------------------------------------------
# constants for azure app setting environment variables
AZURE_STORAGE_ACCOUNT = 'AZURE_STORAGE_ACCOUNT'
AZURE_STORAGE_ACCESS_KEY = 'AZURE_STORAGE_ACCESS_KEY'
EMULATED = 'EMULATED'
#--------------------------------------------------------------------------
class _StorageClient(object):
'''
This is the base class for BlobManager, TableManager and QueueManager.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base='', dev_host=''):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to http.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
self.account_name = account_name
self.account_key = account_key
self.requestid = None
self.protocol = protocol
self.host_base = host_base
self.dev_host = dev_host
# the app is not run in azure emulator or use default development
# storage account and key if app is run in emulator.
self.use_local_storage = False
# check whether it is run in emulator.
if EMULATED in os.environ:
self.is_emulated = os.environ[EMULATED].lower() != 'false'
else:
self.is_emulated = False
# get account_name and account key. If they are not set when
# constructing, get the account and key from environment variables if
# the app is not run in azure emulator or use default development
# storage account and key if app is run in emulator.
if not self.account_name or not self.account_key:
if self.is_emulated:
self.account_name = DEV_ACCOUNT_NAME
self.account_key = DEV_ACCOUNT_KEY
self.protocol = 'http'
self.use_local_storage = True
else:
self.account_name = os.environ.get(AZURE_STORAGE_ACCOUNT)
self.account_key = os.environ.get(AZURE_STORAGE_ACCESS_KEY)
if not self.account_name or not self.account_key:
raise WindowsAzureError(_ERROR_STORAGE_MISSING_INFO)
self._httpclient = _HTTPClient(
service_instance=self,
account_key=self.account_key,
account_name=self.account_name,
protocol=self.protocol)
self._batchclient = None
self._filter = self._perform_request_worker
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
'''
res = type(self)(self.account_name, self.account_key, self.protocol)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
def _get_host(self):
if self.use_local_storage:
return self.dev_host
else:
return self.account_name + self.host_base
def _perform_request_worker(self, request):
return self._httpclient.perform_request(request)
def _perform_request(self, request, text_encoding='utf-8'):
'''
Sends the request and return response. Catches HTTPError and hand it
to error handler
'''
try:
if self._batchclient is not None:
return self._batchclient.insert_request_to_batch(request)
else:
resp = self._filter(request)
if sys.version_info >= (3,) and isinstance(resp, bytes) and \
text_encoding:
resp = resp.decode(text_encoding)
except HTTPError as ex:
_storage_error_handler(ex)
return resp

View File

@ -0,0 +1,491 @@
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
WindowsAzureError,
TABLE_SERVICE_HOST_BASE,
DEV_TABLE_HOST,
_convert_class_to_xml,
_convert_response_to_feeds,
_dont_fail_not_exist,
_dont_fail_on_exist,
_get_request_body,
_int_or_none,
_parse_response,
_parse_response_for_dict,
_parse_response_for_dict_filter,
_str,
_str_or_none,
_update_request_uri_query_local_storage,
_validate_not_none,
)
from azure.http import HTTPRequest
from azure.http.batchclient import _BatchClient
from azure.storage import (
StorageServiceProperties,
_convert_entity_to_xml,
_convert_response_to_entity,
_convert_table_to_xml,
_convert_xml_to_entity,
_convert_xml_to_table,
_sign_storage_table_request,
_update_storage_table_header,
)
from azure.storage.storageclient import _StorageClient
class TableService(_StorageClient):
'''
This is the main class managing Table resources.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base=TABLE_SERVICE_HOST_BASE, dev_host=DEV_TABLE_HOST):
'''
account_name: your storage account name, required for all operations.
account_key: your storage account key, required for all operations.
protocol: Optional. Protocol. Defaults to http.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host: Optional. Dev host url. Defaults to localhost.
'''
super(TableService, self).__init__(
account_name, account_key, protocol, host_base, dev_host)
def begin_batch(self):
if self._batchclient is None:
self._batchclient = _BatchClient(
service_instance=self,
account_key=self.account_key,
account_name=self.account_name)
return self._batchclient.begin_batch()
def commit_batch(self):
try:
ret = self._batchclient.commit_batch()
finally:
self._batchclient = None
return ret
def cancel_batch(self):
self._batchclient = None
def get_table_service_properties(self):
'''
Gets the properties of a storage account's Table service, including
Windows Azure Storage Analytics.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response(response, StorageServiceProperties)
def set_table_service_properties(self, storage_service_properties):
'''
Sets the properties of a storage account's Table Service, including
Windows Azure Storage Analytics.
storage_service_properties: StorageServiceProperties object.
'''
_validate_not_none('storage_service_properties',
storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.body = _get_request_body(
_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict(response)
def query_tables(self, table_name=None, top=None, next_table_name=None):
'''
Returns a list of tables under the specified account.
table_name: Optional. The specific table to query.
top: Optional. Maximum number of tables to return.
next_table_name:
Optional. When top is used, the next table name is stored in
result.x_ms_continuation['NextTableName']
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
if table_name is not None:
uri_part_table_name = "('" + table_name + "')"
else:
uri_part_table_name = ""
request.path = '/Tables' + uri_part_table_name + ''
request.query = [
('$top', _int_or_none(top)),
('NextTableName', _str_or_none(next_table_name))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_table)
def create_table(self, table, fail_on_exist=False):
'''
Creates a new table in the storage account.
table:
Name of the table to create. Table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
fail_on_exist: Specify whether throw exception when table exists.
'''
_validate_not_none('table', table)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/Tables'
request.body = _get_request_body(_convert_table_to_xml(table))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_table(self, table_name, fail_not_exist=False):
'''
table_name: Name of the table to delete.
fail_not_exist:
Specify whether throw exception when table doesn't exist.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/Tables(\'' + _str(table_name) + '\')'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_entity(self, table_name, partition_key, row_key, select=''):
'''
Get an entity in a table; includes the $select options.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
select: Property names to select.
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('select', select)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(table_name) + \
'(PartitionKey=\'' + _str(partition_key) + \
'\',RowKey=\'' + \
_str(row_key) + '\')?$select=' + \
_str(select) + ''
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_entity(response)
def query_entities(self, table_name, filter=None, select=None, top=None,
next_partition_key=None, next_row_key=None):
'''
Get entities in a table; includes the $filter and $select options.
table_name: Table to query.
filter:
Optional. Filter as described at
http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
select: Optional. Property names to select from the entities.
top: Optional. Maximum number of entities to return.
next_partition_key:
Optional. When top is used, the next partition key is stored in
result.x_ms_continuation['NextPartitionKey']
next_row_key:
Optional. When top is used, the next partition key is stored in
result.x_ms_continuation['NextRowKey']
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(table_name) + '()'
request.query = [
('$filter', _str_or_none(filter)),
('$select', _str_or_none(select)),
('$top', _int_or_none(top)),
('NextPartitionKey', _str_or_none(next_partition_key)),
('NextRowKey', _str_or_none(next_row_key))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_entity)
def insert_entity(self, table_name, entity,
content_type='application/atom+xml'):
'''
Inserts a new entity into a table.
table_name: Table name.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(table_name) + ''
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _convert_response_to_entity(response)
def update_entity(self, table_name, partition_key, row_key, entity,
content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity in a table. The Update Entity operation
replaces the entire entity and can be used to remove properties.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
if_match:
Optional. Specifies the condition for which the merge should be
performed. To force an unconditional merge, set to the wildcard
character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(table_name) + '(PartitionKey=\'' + \
_str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def merge_entity(self, table_name, partition_key, row_key, entity,
content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity by updating the entity's properties. This
operation does not replace the existing entity as the Update Entity
operation does.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Can be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
if_match:
Optional. Specifies the condition for which the merge should be
performed. To force an unconditional merge, set to the wildcard
character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
request.host = self._get_host()
request.path = '/' + \
_str(table_name) + '(PartitionKey=\'' + \
_str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def delete_entity(self, table_name, partition_key, row_key,
content_type='application/atom+xml', if_match='*'):
'''
Deletes an existing entity in a table.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
content_type: Required. Must be set to application/atom+xml
if_match:
Optional. Specifies the condition for which the delete should be
performed. To force an unconditional delete, set to the wildcard
character (*).
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('content_type', content_type)
_validate_not_none('if_match', if_match)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(table_name) + '(PartitionKey=\'' + \
_str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
('If-Match', _str_or_none(if_match))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
self._perform_request(request)
def insert_or_replace_entity(self, table_name, partition_key, row_key,
entity, content_type='application/atom+xml'):
'''
Replaces an existing entity or inserts a new entity if it does not
exist in the table. Because this operation can insert or update an
entity, it is also known as an "upsert" operation.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(table_name) + '(PartitionKey=\'' + \
_str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def insert_or_merge_entity(self, table_name, partition_key, row_key,
entity, content_type='application/atom+xml'):
'''
Merges an existing entity or inserts a new entity if it does not exist
in the table. Because this operation can insert or update an entity,
it is also known as an "upsert" operation.
table_name: Table name.
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
entity:
Required. The entity object to insert. Could be a dict format or
entity object.
content_type: Required. Must be set to application/atom+xml
'''
_validate_not_none('table_name', table_name)
_validate_not_none('partition_key', partition_key)
_validate_not_none('row_key', row_key)
_validate_not_none('entity', entity)
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
request.host = self._get_host()
request.path = '/' + \
_str(table_name) + '(PartitionKey=\'' + \
_str(partition_key) + '\',RowKey=\'' + _str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(_convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
return _parse_response_for_dict_filter(response, filter=['etag'])
def _perform_request_worker(self, request):
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
return self._httpclient.perform_request(request)

Binary file not shown.

Binary file not shown.