diff --git a/awx/lib/site-packages/libcloud/__init__.py b/awx/lib/site-packages/libcloud/__init__.py
new file mode 100644
index 0000000000..3ce79f0b20
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/__init__.py
@@ -0,0 +1,65 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+libcloud provides a unified interface to the cloud computing resources.
+
+:var __version__: Current version of libcloud
+"""
+
+__all__ = ['__version__', 'enable_debug']
+__version__ = '0.15.1'
+
+import os
+
+try:
+ import paramiko
+ have_paramiko = True
+except ImportError:
+ have_paramiko = False
+
+
+def enable_debug(fo):
+ """
+ Enable library wide debugging to a file-like object.
+
+ :param fo: Where to append debugging information
+ :type fo: File like object, only write operations are used.
+ """
+ from libcloud.common.base import (Connection,
+ LoggingHTTPConnection,
+ LoggingHTTPSConnection)
+ LoggingHTTPSConnection.log = fo
+ LoggingHTTPConnection.log = fo
+ Connection.conn_classes = (LoggingHTTPConnection,
+ LoggingHTTPSConnection)
+
+
+def _init_once():
+ """
+ Utility function that is ran once on Library import.
+
+ This checks for the LIBCLOUD_DEBUG environment variable, which if it exists
+ is where we will log debug information about the provider transports.
+ """
+ path = os.getenv('LIBCLOUD_DEBUG')
+ if path:
+ fo = open(path, 'a')
+ enable_debug(fo)
+
+ if have_paramiko:
+ paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
+
+_init_once()
diff --git a/awx/lib/site-packages/libcloud/common/__init__.py b/awx/lib/site-packages/libcloud/common/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/awx/lib/site-packages/libcloud/common/abiquo.py b/awx/lib/site-packages/libcloud/common/abiquo.py
new file mode 100644
index 0000000000..f621b2aa5c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/abiquo.py
@@ -0,0 +1,260 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Abiquo Utilities Module for the Abiquo Driver.
+
+Common utilities needed by the :class:`AbiquoNodeDriver`.
+"""
+import base64
+
+from libcloud.common.base import ConnectionUserAndKey, PollingConnection
+from libcloud.common.base import XmlResponse
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlparse
+from libcloud.utils.py3 import b
+from libcloud.compute.base import NodeState
+
+
+def get_href(element, rel):
+ """
+ Search a RESTLink element in the :class:`AbiquoResponse`.
+
+ Abiquo, as a REST API, it offers self-discovering functionality.
+ That means that you could walk through the whole API only
+ navigating from the links offered by the entities.
+
+ This is a basic method to find the 'relations' of an entity searching into
+ its links.
+
+ For instance, a Rack entity serialized as XML as the following::
+
+
+
+
+
+
+ false
+ 1
+
+ racacaca
+ 10
+
+ 4094
+ 2
+ 1
+
+
+
+ offers link to datacenters (rel='datacenter'), to itself (rel='edit') and
+ to the machines defined in it (rel='machines')
+
+ A call to this method with the 'rack' element using 'datacenter' as 'rel'
+ will return:
+
+ 'http://10.60.12.7:80/api/admin/datacenters/1'
+
+ :type element: :class:`xml.etree.ElementTree`
+ :param element: Xml Entity returned by Abiquo API (required)
+ :type rel: ``str``
+ :param rel: relation link name
+ :rtype: ``str``
+ :return: the 'href' value according to the 'rel' input parameter
+ """
+ links = element.findall('link')
+ for link in links:
+ if link.attrib['rel'] == rel:
+ href = link.attrib['href']
+ # href is something like:
+ #
+ # 'http://localhost:80/api/admin/enterprises'
+ #
+ # we are only interested in '/admin/enterprises/' part
+ needle = '/api/'
+ url_path = urlparse.urlparse(href).path
+ index = url_path.find(needle)
+ result = url_path[index + len(needle) - 1:]
+ return result
+
+
+class AbiquoResponse(XmlResponse):
+ """
+ Abiquo XML Response.
+
+ Wraps the response in XML bodies or extract the error data in
+ case of error.
+ """
+
+ # Map between abiquo state and Libcloud State
+ NODE_STATE_MAP = {
+ 'NOT_ALLOCATED': NodeState.TERMINATED,
+ 'ALLOCATED': NodeState.PENDING,
+ 'CONFIGURED': NodeState.PENDING,
+ 'ON': NodeState.RUNNING,
+ 'PAUSED': NodeState.PENDING,
+ 'OFF': NodeState.PENDING,
+ 'LOCKED': NodeState.PENDING,
+ 'UNKNOWN': NodeState.UNKNOWN
+ }
+
+ def parse_error(self):
+ """
+ Parse the error messages.
+
+ Response body can easily be handled by this class parent
+ :class:`XmlResponse`, but there are use cases which Abiquo API
+ does not respond an XML but an HTML. So we need to
+ handle these special cases.
+ """
+ if self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError(driver=self.connection.driver)
+ elif self.status == httplib.FORBIDDEN:
+ raise ForbiddenError(self.connection.driver)
+ else:
+ errors = self.parse_body().findall('error')
+ # Most of the exceptions only have one error
+ raise LibcloudError(errors[0].findtext('message'))
+
+ def success(self):
+ """
+ Determine if the request was successful.
+
+ Any of the 2XX HTTP response codes are accepted as successfull requests
+
+ :rtype: ``bool``
+ :return: successful request or not.
+ """
+ return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
+ httplib.ACCEPTED]
+
+ def async_success(self):
+ """
+ Determinate if async request was successful.
+
+ An async_request retrieves for a task object that can be successfully
+ retrieved (self.status == OK), but the asyncronous task (the body of
+ the HTTP response) which we are asking for has finished with an error.
+ So this method checks if the status code is 'OK' and if the task
+ has finished successfully.
+
+ :rtype: ``bool``
+ :return: successful asynchronous request or not
+ """
+ if self.success():
+ # So we have a 'task' object in the body
+ task = self.parse_body()
+ return task.findtext('state') == 'FINISHED_SUCCESSFULLY'
+ else:
+ return False
+
+
+class AbiquoConnection(ConnectionUserAndKey, PollingConnection):
+ """
+ A Connection to Abiquo API.
+
+ Basic :class:`ConnectionUserAndKey` connection with
+ :class:`PollingConnection` features for asynchronous tasks.
+ """
+
+ responseCls = AbiquoResponse
+
+ def __init__(self, user_id, key, secure=True, host=None, port=None,
+ url=None, timeout=None):
+ super(AbiquoConnection, self).__init__(user_id=user_id, key=key,
+ secure=secure,
+ host=host, port=port,
+ url=url, timeout=timeout)
+
+ # This attribute stores data cached across multiple request
+ self.cache = {}
+
+ def add_default_headers(self, headers):
+ """
+ Add Basic Authentication header to all the requests.
+
+ It injects the 'Authorization: Basic Base64String===' header
+ in each request
+
+ :type headers: ``dict``
+ :param headers: Default input headers
+ :rtype ``dict``
+ :return: Default input headers with the 'Authorization'
+ header
+ """
+ b64string = b('%s:%s' % (self.user_id, self.key))
+ encoded = base64.b64encode(b64string).decode('utf-8')
+
+ authorization = 'Basic ' + encoded
+
+ headers['Authorization'] = authorization
+ return headers
+
+ def get_poll_request_kwargs(self, response, context, request_kwargs):
+ """
+ Manage polling request arguments.
+
+ Return keyword arguments which are passed to the
+ :class:`NodeDriver.request` method when polling for the job status. The
+ Abiquo Asynchronous Response returns and 'acceptedrequest' XmlElement
+ as the following::
+
+
+
+ You can follow the progress in the link
+
+
+ We need to extract the href URI to poll.
+
+ :type response: :class:`xml.etree.ElementTree`
+ :keyword response: Object returned by poll request.
+ :type request_kwargs: ``dict``
+ :keyword request_kwargs: Default request arguments and headers
+ :rtype: ``dict``
+ :return: Modified keyword arguments
+ """
+ accepted_request_obj = response.object
+ link_poll = get_href(accepted_request_obj, 'status')
+
+ # Override just the 'action' and 'method' keys of the previous dict
+ request_kwargs['action'] = link_poll
+ request_kwargs['method'] = 'GET'
+ return request_kwargs
+
+ def has_completed(self, response):
+ """
+ Decide if the asynchronous job has ended.
+
+ :type response: :class:`xml.etree.ElementTree`
+ :param response: Response object returned by poll request
+ :rtype: ``bool``
+ :return: Whether the job has completed
+ """
+ task = response.object
+ task_state = task.findtext('state')
+ return task_state in ['FINISHED_SUCCESSFULLY', 'ABORTED',
+ 'FINISHED_UNSUCCESSFULLY']
+
+
+class ForbiddenError(LibcloudError):
+ """
+ Exception used when credentials are ok but user has not permissions.
+ """
+
+ def __init__(self, driver):
+ message = 'User has not permission to perform this task.'
+ super(LibcloudError, self).__init__(message, driver)
diff --git a/awx/lib/site-packages/libcloud/common/aws.py b/awx/lib/site-packages/libcloud/common/aws.py
new file mode 100644
index 0000000000..1d3dfb45fb
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/aws.py
@@ -0,0 +1,193 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import hmac
+import time
+from hashlib import sha256
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.common.base import ConnectionUserAndKey, XmlResponse, BaseDriver
+from libcloud.common.types import InvalidCredsError, MalformedResponseError
+from libcloud.utils.py3 import b, httplib, urlquote
+from libcloud.utils.xml import findtext, findall
+
+
+class AWSBaseResponse(XmlResponse):
+ namespace = None
+
+ def _parse_error_details(self, element):
+ """
+ Parse code and message from the provided error element.
+
+ :return: ``tuple`` with two elements: (code, message)
+ :rtype: ``tuple``
+ """
+ code = findtext(element=element, xpath='Code',
+ namespace=self.namespace)
+ message = findtext(element=element, xpath='Message',
+ namespace=self.namespace)
+
+ return code, message
+
+
+class AWSGenericResponse(AWSBaseResponse):
+ # There are multiple error messages in AWS, but they all have an Error node
+ # with Code and Message child nodes. Xpath to select them
+ # None if the root node *is* the Error node
+ xpath = None
+
+ # This dict maps CodeName to a specific
+ # exception class that is raised immediately.
+ # If a custom exception class is not defined, errors are accumulated and
+ # returned from the parse_error method.
+ expections = {}
+
+ def success(self):
+ return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
+
+ def parse_error(self):
+ context = self.connection.context
+ status = int(self.status)
+
+ # FIXME: Probably ditch this as the forbidden message will have
+ # corresponding XML.
+ if status == httplib.FORBIDDEN:
+ if not self.body:
+ raise InvalidCredsError(str(self.status) + ': ' + self.error)
+ else:
+ raise InvalidCredsError(self.body)
+
+ try:
+ body = ET.XML(self.body)
+ except Exception:
+ raise MalformedResponseError('Failed to parse XML',
+ body=self.body,
+ driver=self.connection.driver)
+
+ if self.xpath:
+ errs = findall(element=body, xpath=self.xpath,
+ namespace=self.namespace)
+ else:
+ errs = [body]
+
+ msgs = []
+ for err in errs:
+ code, message = self._parse_error_details(element=err)
+ exceptionCls = self.exceptions.get(code, None)
+
+ if exceptionCls is None:
+ msgs.append('%s: %s' % (code, message))
+ continue
+
+ # Custom exception class is defined, immediately throw an exception
+ params = {}
+ if hasattr(exceptionCls, 'kwargs'):
+ for key in exceptionCls.kwargs:
+ if key in context:
+ params[key] = context[key]
+
+ raise exceptionCls(value=message, driver=self.connection.driver,
+ **params)
+
+ return "\n".join(msgs)
+
+
+class AWSTokenConnection(ConnectionUserAndKey):
+ def __init__(self, user_id, key, secure=True,
+ host=None, port=None, url=None, timeout=None, token=None):
+ self.token = token
+ super(AWSTokenConnection, self).__init__(user_id, key, secure=secure,
+ host=host, port=port, url=url,
+ timeout=timeout)
+
+ def add_default_params(self, params):
+ # Even though we are adding it to the headers, we need it here too
+ # so that the token is added to the signature.
+ if self.token:
+ params['x-amz-security-token'] = self.token
+ return super(AWSTokenConnection, self).add_default_params(params)
+
+ def add_default_headers(self, headers):
+ if self.token:
+ headers['x-amz-security-token'] = self.token
+ return super(AWSTokenConnection, self).add_default_headers(headers)
+
+
+class SignedAWSConnection(AWSTokenConnection):
+
+ def add_default_params(self, params):
+ params['SignatureVersion'] = '2'
+ params['SignatureMethod'] = 'HmacSHA256'
+ params['AWSAccessKeyId'] = self.user_id
+ params['Version'] = self.version
+ params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
+ time.gmtime())
+ params['Signature'] = self._get_aws_auth_param(params, self.key,
+ self.action)
+ return params
+
+ def _get_aws_auth_param(self, params, secret_key, path='/'):
+ """
+ Creates the signature required for AWS, per
+ http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
+
+ StringToSign = HTTPVerb + "\n" +
+ ValueOfHostHeaderInLowercase + "\n" +
+ HTTPRequestURI + "\n" +
+ CanonicalizedQueryString
+ """
+ keys = list(params.keys())
+ keys.sort()
+ pairs = []
+ for key in keys:
+ value = str(params[key])
+ pairs.append(urlquote(key, safe='') + '=' +
+ urlquote(value, safe='-_~'))
+
+ qs = '&'.join(pairs)
+
+ hostname = self.host
+ if (self.secure and self.port != 443) or \
+ (not self.secure and self.port != 80):
+ hostname += ":" + str(self.port)
+
+ string_to_sign = '\n'.join(('GET', hostname, path, qs))
+
+ b64_hmac = base64.b64encode(
+ hmac.new(b(secret_key), b(string_to_sign),
+ digestmod=sha256).digest()
+ )
+
+ return b64_hmac.decode('utf-8')
+
+
+class AWSDriver(BaseDriver):
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ api_version=None, region=None, token=None, **kwargs):
+ self.token = token
+ super(AWSDriver, self).__init__(key, secret=secret, secure=secure,
+ host=host, port=port,
+ api_version=api_version, region=region,
+ token=token, **kwargs)
+
+ def _ex_connection_class_kwargs(self):
+ kwargs = super(AWSDriver, self)._ex_connection_class_kwargs()
+ kwargs['token'] = self.token
+ return kwargs
diff --git a/awx/lib/site-packages/libcloud/common/azure.py b/awx/lib/site-packages/libcloud/common/azure.py
new file mode 100644
index 0000000000..104cca817b
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/azure.py
@@ -0,0 +1,189 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import time
+import base64
+import hmac
+
+from hashlib import sha256
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+from libcloud.utils.xml import fixxpath
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.common.types import InvalidCredsError
+from libcloud.common.types import LibcloudError, MalformedResponseError
+from libcloud.common.base import ConnectionUserAndKey, RawResponse
+from libcloud.common.base import XmlResponse
+
+# Azure API version
+API_VERSION = '2012-02-12'
+
+# The time format for headers in Azure requests
+AZURE_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
+
+
+class AzureResponse(XmlResponse):
+
+ valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
+ httplib.BAD_REQUEST]
+
+ def success(self):
+ i = int(self.status)
+ return i >= 200 and i <= 299 or i in self.valid_response_codes
+
+ def parse_error(self, msg=None):
+ error_msg = 'Unknown error'
+
+ try:
+ # Azure does give some meaningful errors, but is inconsistent
+ # Some APIs respond with an XML error. Others just dump HTML
+ body = self.parse_body()
+
+ if type(body) == ET.Element:
+ code = body.findtext(fixxpath(xpath='Code'))
+ message = body.findtext(fixxpath(xpath='Message'))
+ message = message.split('\n')[0]
+ error_msg = '%s: %s' % (code, message)
+
+ except MalformedResponseError:
+ pass
+
+ if msg:
+ error_msg = '%s - %s' % (msg, error_msg)
+
+ if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
+ raise InvalidCredsError(error_msg)
+
+ raise LibcloudError('%s Status code: %d.' % (error_msg, self.status),
+ driver=self)
+
+
+class AzureRawResponse(RawResponse):
+ pass
+
+
+class AzureConnection(ConnectionUserAndKey):
+ """
+ Represents a single connection to Azure
+ """
+
+ responseCls = AzureResponse
+ rawResponseCls = AzureRawResponse
+
+ def add_default_params(self, params):
+ return params
+
+ def pre_connect_hook(self, params, headers):
+ headers = copy.deepcopy(headers)
+
+ # We have to add a date header in GMT
+ headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime())
+ headers['x-ms-version'] = API_VERSION
+
+ # Add the authorization header
+ headers['Authorization'] = self._get_azure_auth_signature(
+ method=self.method, headers=headers, params=params,
+ account=self.user_id, secret_key=self.key, path=self.action)
+
+ # Azure cribs about this in 'raw' connections
+ headers.pop('Host', None)
+
+ return params, headers
+
+ def _get_azure_auth_signature(self, method, headers, params,
+ account, secret_key, path='/'):
+ """
+ Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID,
+ UTF-8-Encoding-Of( StringToSign ) ) ) );
+
+ StringToSign = HTTP-VERB + "\n" +
+ Content-Encoding + "\n" +
+ Content-Language + "\n" +
+ Content-Length + "\n" +
+ Content-MD5 + "\n" +
+ Content-Type + "\n" +
+ Date + "\n" +
+ If-Modified-Since + "\n" +
+ If-Match + "\n" +
+ If-None-Match + "\n" +
+ If-Unmodified-Since + "\n" +
+ Range + "\n" +
+ CanonicalizedHeaders +
+ CanonicalizedResource;
+ """
+ special_header_values = []
+ xms_header_values = []
+ param_list = []
+ special_header_keys = ['content-encoding', 'content-language',
+ 'content-length', 'content-md5',
+ 'content-type', 'date', 'if-modified-since',
+ 'if-match', 'if-none-match',
+ 'if-unmodified-since', 'range']
+
+ # Split the x-ms headers and normal headers and make everything
+ # lower case
+ headers_copy = {}
+ for header, value in headers.items():
+ header = header.lower()
+ value = str(value).strip()
+ if header.startswith('x-ms-'):
+ xms_header_values.append((header, value))
+ else:
+ headers_copy[header] = value
+
+ # Get the values for the headers in the specific order
+ for header in special_header_keys:
+ header = header.lower() # Just for safety
+ if header in headers_copy:
+ special_header_values.append(headers_copy[header])
+ else:
+ special_header_values.append('')
+
+ # Prepare the first section of the string to be signed
+ values_to_sign = [method] + special_header_values
+ # string_to_sign = '\n'.join([method] + special_header_values)
+
+ # The x-ms-* headers have to be in lower case and sorted
+ xms_header_values.sort()
+
+ for header, value in xms_header_values:
+ values_to_sign.append('%s:%s' % (header, value))
+
+ # Add the canonicalized path
+ values_to_sign.append('/%s%s' % (account, path))
+
+ # URL query parameters (sorted and lower case)
+ for key, value in params.items():
+ param_list.append((key.lower(), str(value).strip()))
+
+ param_list.sort()
+
+ for key, value in param_list:
+ values_to_sign.append('%s:%s' % (key, value))
+
+ string_to_sign = b('\n'.join(values_to_sign))
+ secret_key = b(secret_key)
+ b64_hmac = base64.b64encode(
+ hmac.new(secret_key, string_to_sign, digestmod=sha256).digest()
+ )
+
+ return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8'))
diff --git a/awx/lib/site-packages/libcloud/common/base.py b/awx/lib/site-packages/libcloud/common/base.py
new file mode 100644
index 0000000000..a42f228243
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/base.py
@@ -0,0 +1,968 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import ssl
+import copy
+import binascii
+import time
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from pipes import quote as pquote
+
+try:
+ import simplejson as json
+except:
+ import json
+
+import libcloud
+
+from libcloud.utils.py3 import PY3, PY25
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlparse
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import StringIO
+from libcloud.utils.py3 import u
+from libcloud.utils.py3 import b
+
+from libcloud.utils.misc import lowercase_keys
+from libcloud.utils.compression import decompress_data
+from libcloud.common.types import LibcloudError, MalformedResponseError
+
+from libcloud.httplib_ssl import LibcloudHTTPSConnection
+
+LibcloudHTTPConnection = httplib.HTTPConnection
+
+
+class HTTPResponse(httplib.HTTPResponse):
+ # On python 2.6 some calls can hang because HEAD isn't quite properly
+ # supported.
+ # In particular this happens on S3 when calls are made to get_object to
+ # objects that don't exist.
+ # This applies the behaviour from 2.7, fixing the hangs.
+ def read(self, amt=None):
+ if self.fp is None:
+ return ''
+
+ if self._method == 'HEAD':
+ self.close()
+ return ''
+
+ return httplib.HTTPResponse.read(self, amt)
+
+
+class Response(object):
+ """
+ A base Response class to derive from.
+ """
+
+ status = httplib.OK # Response status code
+ headers = {} # Response headers
+ body = None # Raw response body
+ object = None # Parsed response body
+
+ error = None # Reason returned by the server.
+ connection = None # Parent connection class
+ parse_zero_length_body = False
+
+ def __init__(self, response, connection):
+ """
+ :param response: HTTP response object. (optional)
+ :type response: :class:`httplib.HTTPResponse`
+
+ :param connection: Parent connection object.
+ :type connection: :class:`.Connection`
+ """
+ self.connection = connection
+
+ # http.client In Python 3 doesn't automatically lowercase the header
+ # names
+ self.headers = lowercase_keys(dict(response.getheaders()))
+ self.error = response.reason
+ self.status = response.status
+
+ # This attribute is set when using LoggingConnection.
+ original_data = getattr(response, '_original_data', None)
+
+ if original_data:
+ # LoggingConnection already decompresses data so it can log it
+ # which means we don't need to decompress it here.
+ self.body = response._original_data
+ else:
+ self.body = self._decompress_response(body=response.read(),
+ headers=self.headers)
+
+ if PY3:
+ self.body = b(self.body).decode('utf-8')
+
+ if not self.success():
+ raise Exception(self.parse_error())
+
+ self.object = self.parse_body()
+
+ def parse_body(self):
+ """
+ Parse response body.
+
+ Override in a provider's subclass.
+
+ :return: Parsed body.
+ :rtype: ``str``
+ """
+ return self.body
+
+ def parse_error(self):
+ """
+ Parse the error messages.
+
+ Override in a provider's subclass.
+
+ :return: Parsed error.
+ :rtype: ``str``
+ """
+ return self.body
+
+ def success(self):
+ """
+ Determine if our request was successful.
+
+ The meaning of this can be arbitrary; did we receive OK status? Did
+ the node get created? Were we authenticated?
+
+ :rtype: ``bool``
+ :return: ``True`` or ``False``
+ """
+ return self.status in [httplib.OK, httplib.CREATED]
+
+ def _decompress_response(self, body, headers):
+ """
+ Decompress a response body if it is using deflate or gzip encoding.
+
+ :param body: Response body.
+ :type body: ``str``
+
+ :param headers: Response headers.
+ :type headers: ``dict``
+
+ :return: Decompressed response
+ :rtype: ``str``
+ """
+ encoding = headers.get('content-encoding', None)
+
+ if encoding in ['zlib', 'deflate']:
+ body = decompress_data('zlib', body)
+ elif encoding in ['gzip', 'x-gzip']:
+ body = decompress_data('gzip', body)
+ else:
+ body = body.strip()
+
+ return body
+
+
+class JsonResponse(Response):
+ """
+ A Base JSON Response class to derive from.
+ """
+
+ def parse_body(self):
+ if len(self.body) == 0 and not self.parse_zero_length_body:
+ return self.body
+
+ try:
+ body = json.loads(self.body)
+ except:
+ raise MalformedResponseError(
+ 'Failed to parse JSON',
+ body=self.body,
+ driver=self.connection.driver)
+ return body
+
+ parse_error = parse_body
+
+
+class XmlResponse(Response):
+ """
+ A Base XML Response class to derive from.
+ """
+
+ def parse_body(self):
+ if len(self.body) == 0 and not self.parse_zero_length_body:
+ return self.body
+
+ try:
+ body = ET.XML(self.body)
+ except:
+ raise MalformedResponseError('Failed to parse XML',
+ body=self.body,
+ driver=self.connection.driver)
+ return body
+
+ parse_error = parse_body
+
+
+class RawResponse(Response):
+
+ def __init__(self, connection):
+ """
+ :param connection: Parent connection object.
+ :type connection: :class:`.Connection`
+ """
+ self._status = None
+ self._response = None
+ self._headers = {}
+ self._error = None
+ self._reason = None
+ self.connection = connection
+
+ @property
+ def response(self):
+ if not self._response:
+ response = self.connection.connection.getresponse()
+ self._response, self.body = response, response
+ if not self.success():
+ self.parse_error()
+ return self._response
+
+ @property
+ def status(self):
+ if not self._status:
+ self._status = self.response.status
+ return self._status
+
+ @property
+ def headers(self):
+ if not self._headers:
+ self._headers = lowercase_keys(dict(self.response.getheaders()))
+ return self._headers
+
+ @property
+ def reason(self):
+ if not self._reason:
+ self._reason = self.response.reason
+ return self._reason
+
+
+# TODO: Move this to a better location/package
+class LoggingConnection():
+ """
+ Debug class to log all HTTP(s) requests as they could be made
+ with the curl command.
+
+ :cvar log: file-like object that logs entries are written to.
+ """
+ log = None
+
+ def _log_response(self, r):
+ rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r))
+ ht = ""
+ v = r.version
+ if r.version == 10:
+ v = "HTTP/1.0"
+ if r.version == 11:
+ v = "HTTP/1.1"
+ ht += "%s %s %s\r\n" % (v, r.status, r.reason)
+ body = r.read()
+ for h in r.getheaders():
+ ht += "%s: %s\r\n" % (h[0].title(), h[1])
+ ht += "\r\n"
+
+ # this is evil. laugh with me. ha arharhrhahahaha
+ class fakesock:
+ def __init__(self, s):
+ self.s = s
+
+ def makefile(self, *args, **kwargs):
+ if PY3:
+ from io import BytesIO
+ cls = BytesIO
+ else:
+ cls = StringIO
+
+ return cls(b(self.s))
+ rr = r
+ headers = lowercase_keys(dict(r.getheaders()))
+
+ encoding = headers.get('content-encoding', None)
+
+ if encoding in ['zlib', 'deflate']:
+ body = decompress_data('zlib', body)
+ elif encoding in ['gzip', 'x-gzip']:
+ body = decompress_data('gzip', body)
+
+ if r.chunked:
+ ht += "%x\r\n" % (len(body))
+ ht += u(body)
+ ht += "\r\n0\r\n"
+ else:
+ ht += u(body)
+
+ if sys.version_info >= (2, 6) and sys.version_info < (2, 7):
+ cls = HTTPResponse
+ else:
+ cls = httplib.HTTPResponse
+
+ rr = cls(sock=fakesock(ht), method=r._method,
+ debuglevel=r.debuglevel)
+ rr.begin()
+ rv += ht
+ rv += ("\n# -------- end %d:%d response ----------\n"
+ % (id(self), id(r)))
+
+ rr._original_data = body
+ return (rr, rv)
+
+ def _log_curl(self, method, url, body, headers):
+ cmd = ["curl", "-i"]
+
+ if method.lower() == 'head':
+ # HEAD method need special handling
+ cmd.extend(["--head"])
+ else:
+ cmd.extend(["-X", pquote(method)])
+
+ for h in headers:
+ cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))])
+
+ # TODO: in python 2.6, body can be a file-like object.
+ if body is not None and len(body) > 0:
+ cmd.extend(["--data-binary", pquote(body)])
+
+ cmd.extend(["--compress"])
+ cmd.extend([pquote("%s://%s:%d%s" % (self.protocol, self.host,
+ self.port, url))])
+ return " ".join(cmd)
+
+
+class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection):
+ """
+ Utility Class for logging HTTPS connections
+ """
+
+ protocol = 'https'
+
+ def getresponse(self):
+ r = LibcloudHTTPSConnection.getresponse(self)
+ if self.log is not None:
+ r, rv = self._log_response(r)
+ self.log.write(rv + "\n")
+ self.log.flush()
+ return r
+
+ def request(self, method, url, body=None, headers=None):
+ headers.update({'X-LC-Request-ID': str(id(self))})
+ if self.log is not None:
+ pre = "# -------- begin %d request ----------\n" % id(self)
+ self.log.write(pre +
+ self._log_curl(method, url, body, headers) + "\n")
+ self.log.flush()
+ return LibcloudHTTPSConnection.request(self, method, url, body,
+ headers)
+
+
+class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection):
+ """
+ Utility Class for logging HTTP connections
+ """
+
+ protocol = 'http'
+
+ def getresponse(self):
+ r = LibcloudHTTPConnection.getresponse(self)
+ if self.log is not None:
+ r, rv = self._log_response(r)
+ self.log.write(rv + "\n")
+ self.log.flush()
+ return r
+
+ def request(self, method, url, body=None, headers=None):
+ headers.update({'X-LC-Request-ID': str(id(self))})
+ if self.log is not None:
+ pre = '# -------- begin %d request ----------\n' % id(self)
+ self.log.write(pre +
+ self._log_curl(method, url, body, headers) + "\n")
+ self.log.flush()
+ return LibcloudHTTPConnection.request(self, method, url,
+ body, headers)
+
+
+class Connection(object):
+ """
+ A Base Connection class to derive from.
+ """
+ # conn_classes = (LoggingHTTPSConnection)
+ conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection)
+
+ responseCls = Response
+ rawResponseCls = RawResponse
+ connection = None
+ host = '127.0.0.1'
+ port = 443
+ timeout = None
+ secure = 1
+ driver = None
+ action = None
+ cache_busting = False
+
+ allow_insecure = True
+
+ def __init__(self, secure=True, host=None, port=None, url=None,
+ timeout=None):
+ self.secure = secure and 1 or 0
+ self.ua = []
+ self.context = {}
+
+ if not self.allow_insecure and not secure:
+ # TODO: We should eventually switch to whitelist instead of
+ # blacklist approach
+ raise ValueError('Non https connections are not allowed (use '
+ 'secure=True)')
+
+ self.request_path = ''
+
+ if host:
+ self.host = host
+
+ if port is not None:
+ self.port = port
+ else:
+ if self.secure == 1:
+ self.port = 443
+ else:
+ self.port = 80
+
+ if url:
+ (self.host, self.port, self.secure,
+ self.request_path) = self._tuple_from_url(url)
+
+ if timeout:
+ self.timeout = timeout
+
+ def set_context(self, context):
+ if not isinstance(context, dict):
+ raise TypeError('context needs to be a dictionary')
+
+ self.context = context
+
+ def reset_context(self):
+ self.context = {}
+
+ def _tuple_from_url(self, url):
+ secure = 1
+ port = None
+ (scheme, netloc, request_path, param,
+ query, fragment) = urlparse.urlparse(url)
+
+ if scheme not in ['http', 'https']:
+ raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url))
+
+ if scheme == "http":
+ secure = 0
+
+ if ":" in netloc:
+ netloc, port = netloc.rsplit(":")
+ port = port
+
+ if not port:
+ if scheme == "http":
+ port = 80
+ else:
+ port = 443
+
+ host = netloc
+
+ return (host, port, secure, request_path)
+
+ def connect(self, host=None, port=None, base_url=None):
+ """
+ Establish a connection with the API server.
+
+ :type host: ``str``
+ :param host: Optional host to override our default
+
+ :type port: ``int``
+ :param port: Optional port to override our default
+
+ :returns: A connection
+ """
+ # prefer the attribute base_url if its set or sent
+ connection = None
+ secure = self.secure
+
+ if getattr(self, 'base_url', None) and base_url is None:
+ (host, port,
+ secure, request_path) = self._tuple_from_url(self.base_url)
+ elif base_url is not None:
+ (host, port,
+ secure, request_path) = self._tuple_from_url(base_url)
+ else:
+ host = host or self.host
+ port = port or self.port
+
+ kwargs = {'host': host, 'port': int(port)}
+
+ # Timeout is only supported in Python 2.6 and later
+ # http://docs.python.org/library/httplib.html#httplib.HTTPConnection
+ if self.timeout and not PY25:
+ kwargs.update({'timeout': self.timeout})
+
+ connection = self.conn_classes[secure](**kwargs)
+ # You can uncoment this line, if you setup a reverse proxy server
+ # which proxies to your endpoint, and lets you easily capture
+ # connections in cleartext when you setup the proxy to do SSL
+ # for you
+ # connection = self.conn_classes[False]("127.0.0.1", 8080)
+
+ self.connection = connection
+
+ def _user_agent(self):
+ user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua])
+
+ if self.driver:
+ user_agent = 'libcloud/%s (%s) %s' % (
+ libcloud.__version__,
+ self.driver.name, user_agent_suffix)
+ else:
+ user_agent = 'libcloud/%s %s' % (
+ libcloud.__version__, user_agent_suffix)
+
+ return user_agent
+
+ def user_agent_append(self, token):
+ """
+ Append a token to a user agent string.
+
+ Users of the library should call this to uniquely identify their
+ requests to a provider.
+
+ :type token: ``str``
+ :param token: Token to add to the user agent.
+ """
+ self.ua.append(token)
+
+ def request(self, action, params=None, data=None, headers=None,
+ method='GET', raw=False):
+ """
+ Request a given `action`.
+
+ Basically a wrapper around the connection
+ object's `request` that does some helpful pre-processing.
+
+ :type action: ``str``
+ :param action: A path. This can include arguments. If included,
+ any extra parameters are appended to the existing ones.
+
+ :type params: ``dict``
+ :param params: Optional mapping of additional parameters to send. If
+ None, leave as an empty ``dict``.
+
+ :type data: ``unicode``
+ :param data: A body of data to send with the request.
+
+ :type headers: ``dict``
+ :param headers: Extra headers to add to the request
+ None, leave as an empty ``dict``.
+
+ :type method: ``str``
+ :param method: An HTTP method such as "GET" or "POST".
+
+ :type raw: ``bool``
+ :param raw: True to perform a "raw" request aka only send the headers
+ and use the rawResponseCls class. This is used with
+ storage API when uploading a file.
+
+ :return: An :class:`Response` instance.
+ :rtype: :class:`Response` instance
+
+ """
+ if params is None:
+ params = {}
+ else:
+ params = copy.copy(params)
+
+ if headers is None:
+ headers = {}
+ else:
+ headers = copy.copy(headers)
+
+ action = self.morph_action_hook(action)
+ self.action = action
+ self.method = method
+
+ # Extend default parameters
+ params = self.add_default_params(params)
+
+ # Add cache busting parameters (if enabled)
+ if self.cache_busting and method == 'GET':
+ params = self._add_cache_busting_to_params(params=params)
+
+ # Extend default headers
+ headers = self.add_default_headers(headers)
+
+ # We always send a user-agent header
+ headers.update({'User-Agent': self._user_agent()})
+
+ # Indicate that we support gzip and deflate compression
+ headers.update({'Accept-Encoding': 'gzip,deflate'})
+
+ port = int(self.port)
+
+ if port not in (80, 443):
+ headers.update({'Host': "%s:%d" % (self.host, port)})
+ else:
+ headers.update({'Host': self.host})
+
+ if data:
+ data = self.encode_data(data)
+ headers['Content-Length'] = str(len(data))
+ elif method.upper() in ['POST', 'PUT'] and not raw:
+ # Only send Content-Length 0 with POST and PUT request.
+ #
+ # Note: Content-Length is not added when using "raw" mode means
+ # means that headers are upfront and the body is sent at some point
+ # later on. With raw mode user can specify Content-Length with
+ # "data" not being set.
+ headers['Content-Length'] = '0'
+
+ params, headers = self.pre_connect_hook(params, headers)
+
+ if params:
+ if '?' in action:
+ url = '&'.join((action, urlencode(params, doseq=True)))
+ else:
+ url = '?'.join((action, urlencode(params, doseq=True)))
+ else:
+ url = action
+
+ # Removed terrible hack...this a less-bad hack that doesn't execute a
+ # request twice, but it's still a hack.
+ self.connect()
+ try:
+ # @TODO: Should we just pass File object as body to request method
+ # instead of dealing with splitting and sending the file ourselves?
+ if raw:
+ self.connection.putrequest(method, url)
+
+ for key, value in list(headers.items()):
+ self.connection.putheader(key, str(value))
+
+ self.connection.endheaders()
+ else:
+ self.connection.request(method=method, url=url, body=data,
+ headers=headers)
+ except ssl.SSLError:
+ e = sys.exc_info()[1]
+ self.reset_context()
+ raise ssl.SSLError(str(e))
+
+ if raw:
+ responseCls = self.rawResponseCls
+ kwargs = {'connection': self}
+ else:
+ responseCls = self.responseCls
+ kwargs = {'connection': self,
+ 'response': self.connection.getresponse()}
+
+ try:
+ response = responseCls(**kwargs)
+ finally:
+ # Always reset the context after the request has completed
+ self.reset_context()
+
+ return response
+
+ def morph_action_hook(self, action):
+ return self.request_path + action
+
+ def add_default_params(self, params):
+ """
+ Adds default parameters (such as API key, version, etc.)
+ to the passed `params`
+
+ Should return a dictionary.
+ """
+ return params
+
+ def add_default_headers(self, headers):
+ """
+ Adds default headers (such as Authorization, X-Foo-Bar)
+ to the passed `headers`
+
+ Should return a dictionary.
+ """
+ return headers
+
+ def pre_connect_hook(self, params, headers):
+ """
+ A hook which is called before connecting to the remote server.
+ This hook can perform a final manipulation on the params, headers and
+ url parameters.
+
+ :type params: ``dict``
+ :param params: Request parameters.
+
+ :type headers: ``dict``
+ :param headers: Request headers.
+ """
+ return params, headers
+
+ def encode_data(self, data):
+ """
+ Encode body data.
+
+ Override in a provider's subclass.
+ """
+ return data
+
+ def _add_cache_busting_to_params(self, params):
+ """
+ Add cache busting parameter to the query parameters of a GET request.
+
+ Parameters are only added if "cache_busting" class attribute is set to
+ True.
+
+ Note: This should only be used with *naughty* providers which use
+ excessive caching of responses.
+ """
+ cache_busting_value = binascii.hexlify(os.urandom(8)).decode('ascii')
+
+ if isinstance(params, dict):
+ params['cache-busting'] = cache_busting_value
+ else:
+ params.append(('cache-busting', cache_busting_value))
+
+ return params
+
+
+class PollingConnection(Connection):
+ """
+ Connection class which can also work with the async APIs.
+
+ After initial requests, this class periodically polls for jobs status and
+ waits until the job has finished.
+ If job doesn't finish in timeout seconds, an Exception thrown.
+ """
+ poll_interval = 0.5
+ timeout = 200
+ request_method = 'request'
+
+ def async_request(self, action, params=None, data=None, headers=None,
+ method='GET', context=None):
+ """
+ Perform an 'async' request to the specified path. Keep in mind that
+ this function is *blocking* and 'async' in this case means that the
+ hit URL only returns a job ID which is the periodically polled until
+ the job has completed.
+
+ This function works like this:
+
+ - Perform a request to the specified path. Response should contain a
+ 'job_id'.
+
+ - Returned 'job_id' is then used to construct a URL which is used for
+ retrieving job status. Constructed URL is then periodically polled
+ until the response indicates that the job has completed or the
+ timeout of 'self.timeout' seconds has been reached.
+
+ :type action: ``str``
+ :param action: A path
+
+ :type params: ``dict``
+ :param params: Optional mapping of additional parameters to send. If
+ None, leave as an empty ``dict``.
+
+ :type data: ``unicode``
+ :param data: A body of data to send with the request.
+
+ :type headers: ``dict``
+ :param headers: Extra headers to add to the request
+ None, leave as an empty ``dict``.
+
+ :type method: ``str``
+ :param method: An HTTP method such as "GET" or "POST".
+
+ :type context: ``dict``
+ :param context: Context dictionary which is passed to the functions
+ which construct initial and poll URL.
+
+ :return: An :class:`Response` instance.
+ :rtype: :class:`Response` instance
+ """
+
+ request = getattr(self, self.request_method)
+ kwargs = self.get_request_kwargs(action=action, params=params,
+ data=data, headers=headers,
+ method=method,
+ context=context)
+ response = request(**kwargs)
+ kwargs = self.get_poll_request_kwargs(response=response,
+ context=context,
+ request_kwargs=kwargs)
+
+ end = time.time() + self.timeout
+ completed = False
+ while time.time() < end and not completed:
+ response = request(**kwargs)
+ completed = self.has_completed(response=response)
+ if not completed:
+ time.sleep(self.poll_interval)
+
+ if not completed:
+ raise LibcloudError('Job did not complete in %s seconds' %
+ (self.timeout))
+
+ return response
+
+ def get_request_kwargs(self, action, params=None, data=None, headers=None,
+ method='GET', context=None):
+ """
+ Arguments which are passed to the initial request() call inside
+ async_request.
+ """
+ kwargs = {'action': action, 'params': params, 'data': data,
+ 'headers': headers, 'method': method}
+ return kwargs
+
+ def get_poll_request_kwargs(self, response, context, request_kwargs):
+ """
+ Return keyword arguments which are passed to the request() method when
+ polling for the job status.
+
+ :param response: Response object returned by poll request.
+ :type response: :class:`HTTPResponse`
+
+ :param request_kwargs: Kwargs previously used to initiate the
+ poll request.
+ :type response: ``dict``
+
+ :return ``dict`` Keyword arguments
+ """
+ raise NotImplementedError('get_poll_request_kwargs not implemented')
+
+ def has_completed(self, response):
+ """
+ Return job completion status.
+
+ :param response: Response object returned by poll request.
+ :type response: :class:`HTTPResponse`
+
+ :return ``bool`` True if the job has completed, False otherwise.
+ """
+ raise NotImplementedError('has_completed not implemented')
+
+
+class ConnectionKey(Connection):
+ """
+ Base connection class which accepts a single ``key`` argument.
+ """
+ def __init__(self, key, secure=True, host=None, port=None, url=None,
+ timeout=None):
+ """
+ Initialize `user_id` and `key`; set `secure` to an ``int`` based on
+ passed value.
+ """
+ super(ConnectionKey, self).__init__(secure=secure, host=host,
+ port=port, url=url,
+ timeout=timeout)
+ self.key = key
+
+
+class ConnectionUserAndKey(ConnectionKey):
+ """
+ Base connection class which accepts a ``user_id`` and ``key`` argument.
+ """
+
+ user_id = None
+
+ def __init__(self, user_id, key, secure=True,
+ host=None, port=None, url=None, timeout=None):
+ super(ConnectionUserAndKey, self).__init__(key, secure=secure,
+ host=host, port=port,
+ url=url, timeout=timeout)
+ self.user_id = user_id
+
+
+class BaseDriver(object):
+ """
+ Base driver class from which other classes can inherit from.
+ """
+
+ connectionCls = ConnectionKey
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ api_version=None, region=None, **kwargs):
+ """
+ :param key: API key or username to be used (required)
+ :type key: ``str``
+
+ :param secret: Secret password to be used (required)
+ :type secret: ``str``
+
+ :param secure: Weither to use HTTPS or HTTP. Note: Some providers
+ only support HTTPS, and it is on by default.
+ :type secure: ``bool``
+
+ :param host: Override hostname used for connections.
+ :type host: ``str``
+
+ :param port: Override port used for connections.
+ :type port: ``int``
+
+ :param api_version: Optional API version. Only used by drivers
+ which support multiple API versions.
+ :type api_version: ``str``
+
+ :param region: Optional driver region. Only used by drivers which
+ support multiple regions.
+ :type region: ``str``
+
+ :rtype: ``None``
+ """
+
+ self.key = key
+ self.secret = secret
+ self.secure = secure
+ args = [self.key]
+
+ if self.secret is not None:
+ args.append(self.secret)
+
+ args.append(secure)
+
+ if host is not None:
+ args.append(host)
+
+ if port is not None:
+ args.append(port)
+
+ self.api_version = api_version
+ self.region = region
+
+ conn_kwargs = self._ex_connection_class_kwargs()
+ self.connection = self.connectionCls(*args, **conn_kwargs)
+
+ self.connection.driver = self
+ self.connection.connect()
+
+ def _ex_connection_class_kwargs(self):
+ """
+ Return extra connection keyword arguments which are passed to the
+ Connection class constructor.
+ """
+ return {}
diff --git a/awx/lib/site-packages/libcloud/common/brightbox.py b/awx/lib/site-packages/libcloud/common/brightbox.py
new file mode 100644
index 0000000000..1943dda0d2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/brightbox.py
@@ -0,0 +1,101 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse
+from libcloud.compute.types import InvalidCredsError
+
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import httplib
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+
+class BrightboxResponse(JsonResponse):
+ def success(self):
+ return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST
+
+ def parse_body(self):
+ if self.headers['content-type'].split(';')[0] == 'application/json':
+ return super(BrightboxResponse, self).parse_body()
+ else:
+ return self.body
+
+ def parse_error(self):
+ response = super(BrightboxResponse, self).parse_body()
+
+ if 'error' in response:
+ if response['error'] in ['invalid_client', 'unauthorized_client']:
+ raise InvalidCredsError(response['error'])
+
+ return response['error']
+ elif 'error_name' in response:
+ return '%s: %s' % (response['error_name'], response['errors'][0])
+
+ return self.body
+
+
+class BrightboxConnection(ConnectionUserAndKey):
+ """
+ Connection class for the Brightbox driver
+ """
+
+ host = 'api.gb1.brightbox.com'
+ responseCls = BrightboxResponse
+
+ def _fetch_oauth_token(self):
+ body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'})
+
+ authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' %
+ (self.user_id, self.key)))).rstrip()
+
+ self.connect()
+
+ headers = {
+ 'Host': self.host,
+ 'User-Agent': self._user_agent(),
+ 'Authorization': authorization,
+ 'Content-Type': 'application/json',
+ 'Content-Length': str(len(body))
+ }
+
+ response = self.connection.request(method='POST', url='/token',
+ body=body, headers=headers)
+
+ response = self.connection.getresponse()
+
+ if response.status == httplib.OK:
+ return json.loads(response.read())['access_token']
+ else:
+ responseCls = BrightboxResponse(response=response, connection=self)
+ message = responseCls.parse_error()
+ raise InvalidCredsError(message)
+
+ def add_default_headers(self, headers):
+ try:
+ headers['Authorization'] = 'OAuth ' + self.token
+ except AttributeError:
+ self.token = self._fetch_oauth_token()
+
+ headers['Authorization'] = 'OAuth ' + self.token
+
+ return headers
+
+ def encode_data(self, data):
+ return json.dumps(data)
diff --git a/awx/lib/site-packages/libcloud/common/cloudsigma.py b/awx/lib/site-packages/libcloud/common/cloudsigma.py
new file mode 100644
index 0000000000..c65535cfde
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/cloudsigma.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'API_ENDPOINTS_1_0',
+ 'API_ENDPOINTS_2_0',
+ 'API_VERSIONS',
+ 'INSTANCE_TYPES'
+]
+
+# API end-points
+API_ENDPOINTS_1_0 = {
+ 'zrh': {
+ 'name': 'Zurich',
+ 'country': 'Switzerland',
+ 'host': 'api.zrh.cloudsigma.com'
+ },
+ 'lvs': {
+ 'name': 'Las Vegas',
+ 'country': 'United States',
+ 'host': 'api.lvs.cloudsigma.com'
+ }
+}
+
+API_ENDPOINTS_2_0 = {
+ 'zrh': {
+ 'name': 'Zurich',
+ 'country': 'Switzerland',
+ 'host': 'zrh.cloudsigma.com'
+ },
+ 'lvs': {
+ 'name': 'Las Vegas',
+ 'country': 'United States',
+ 'host': 'lvs.cloudsigma.com'
+ },
+ 'wdc': {
+ 'name': 'Washington DC',
+ 'country': 'United States',
+ 'host': 'wdc.cloudsigma.com'
+ }
+
+}
+
+DEFAULT_REGION = 'zrh'
+
+# Supported API versions.
+API_VERSIONS = [
+ '1.0' # old and deprecated
+ '2.0'
+]
+
+DEFAULT_API_VERSION = '2.0'
+
+# CloudSigma doesn't specify special instance types.
+# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work,
+# 500 MB to 32000 MB for ram
+# and 1 GB to 1024 GB for hard drive size.
+# Plans in this file are based on examples listed on http://www.cloudsigma
+# .com/en/pricing/price-schedules
+INSTANCE_TYPES = [
+ {
+ 'id': 'micro-regular',
+ 'name': 'Micro/Regular instance',
+ 'cpu': 1100,
+ 'memory': 640,
+ 'disk': 10 + 3,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'micro-high-cpu',
+ 'name': 'Micro/High CPU instance',
+ 'cpu': 2200,
+ 'memory': 640,
+ 'disk': 80,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'standard-small',
+ 'name': 'Standard/Small instance',
+ 'cpu': 1100,
+ 'memory': 1741,
+ 'disk': 50,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'standard-large',
+ 'name': 'Standard/Large instance',
+ 'cpu': 4400,
+ 'memory': 7680,
+ 'disk': 250,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'standard-extra-large',
+ 'name': 'Standard/Extra Large instance',
+ 'cpu': 8800,
+ 'memory': 15360,
+ 'disk': 500,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'high-memory-extra-large',
+ 'name': 'High Memory/Extra Large instance',
+ 'cpu': 7150,
+ 'memory': 17510,
+ 'disk': 250,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'high-memory-double-extra-large',
+ 'name': 'High Memory/Double Extra Large instance',
+ 'cpu': 14300,
+ 'memory': 32768,
+ 'disk': 500,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'high-cpu-medium',
+ 'name': 'High CPU/Medium instance',
+ 'cpu': 5500,
+ 'memory': 1741,
+ 'disk': 150,
+ 'bandwidth': None,
+ },
+ {
+ 'id': 'high-cpu-extra-large',
+ 'name': 'High CPU/Extra Large instance',
+ 'cpu': 20000,
+ 'memory': 7168,
+ 'disk': 500,
+ 'bandwidth': None,
+ }
+]
diff --git a/awx/lib/site-packages/libcloud/common/cloudstack.py b/awx/lib/site-packages/libcloud/common/cloudstack.py
new file mode 100644
index 0000000000..b23d4539ee
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/cloudstack.py
@@ -0,0 +1,195 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import hashlib
+import copy
+import hmac
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import urlquote
+from libcloud.utils.py3 import b
+
+from libcloud.common.types import ProviderError
+from libcloud.common.base import ConnectionUserAndKey, PollingConnection
+from libcloud.common.base import JsonResponse
+from libcloud.common.types import MalformedResponseError
+from libcloud.compute.types import InvalidCredsError
+
+
+class CloudStackResponse(JsonResponse):
+ def parse_error(self):
+ if self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError('Invalid provider credentials')
+
+ body = self.parse_body()
+ values = list(body.values())[0]
+
+ if 'errortext' in values:
+ value = values['errortext']
+ else:
+ value = self.body
+
+ error = ProviderError(value=value, http_code=self.status,
+ driver=self.connection.driver)
+ raise error
+
+
+class CloudStackConnection(ConnectionUserAndKey, PollingConnection):
+ responseCls = CloudStackResponse
+ poll_interval = 1
+ request_method = '_sync_request'
+ timeout = 600
+
+ ASYNC_PENDING = 0
+ ASYNC_SUCCESS = 1
+ ASYNC_FAILURE = 2
+
+ def encode_data(self, data):
+ """
+ Must of the data is sent as part of query params (eeww),
+ but in newer versions, userdata argument can be sent as a
+ urlencoded data in the request body.
+ """
+ if data:
+ data = urlencode(data)
+
+ return data
+
+ def _make_signature(self, params):
+ signature = [(k.lower(), v) for k, v in list(params.items())]
+ signature.sort(key=lambda x: x[0])
+
+ pairs = []
+ for pair in signature:
+ key = urlquote(str(pair[0]), safe='[]')
+ value = urlquote(str(pair[1]), safe='[]')
+ item = '%s=%s' % (key, value)
+ pairs .append(item)
+
+ signature = '&'.join(pairs)
+
+ signature = signature.lower().replace('+', '%20')
+ signature = hmac.new(b(self.key), msg=b(signature),
+ digestmod=hashlib.sha1)
+ return base64.b64encode(b(signature.digest()))
+
+ def add_default_params(self, params):
+ params['apiKey'] = self.user_id
+ params['response'] = 'json'
+
+ return params
+
+ def pre_connect_hook(self, params, headers):
+ params['signature'] = self._make_signature(params)
+
+ return params, headers
+
+ def _async_request(self, command, action=None, params=None, data=None,
+ headers=None, method='GET', context=None):
+ if params:
+ context = copy.deepcopy(params)
+ else:
+ context = {}
+
+ # Command is specified as part of GET call
+ context['command'] = command
+ result = super(CloudStackConnection, self).async_request(
+ action=action, params=params, data=data, headers=headers,
+ method=method, context=context)
+ return result['jobresult']
+
+ def get_request_kwargs(self, action, params=None, data='', headers=None,
+ method='GET', context=None):
+ command = context['command']
+ request_kwargs = {'command': command, 'action': action,
+ 'params': params, 'data': data,
+ 'headers': headers, 'method': method}
+ return request_kwargs
+
+ def get_poll_request_kwargs(self, response, context, request_kwargs):
+ job_id = response['jobid']
+ params = {'jobid': job_id}
+ kwargs = {'command': 'queryAsyncJobResult', 'params': params}
+ return kwargs
+
+ def has_completed(self, response):
+ status = response.get('jobstatus', self.ASYNC_PENDING)
+
+ if status == self.ASYNC_FAILURE:
+ msg = response.get('jobresult', {}).get('errortext', status)
+ raise Exception(msg)
+
+ return status == self.ASYNC_SUCCESS
+
+ def _sync_request(self, command, action=None, params=None, data=None,
+ headers=None, method='GET'):
+ """
+ This method handles synchronous calls which are generally fast
+ information retrieval requests and thus return 'quickly'.
+ """
+ # command is always sent as part of "command" query parameter
+ if params:
+ params = copy.deepcopy(params)
+ else:
+ params = {}
+
+ params['command'] = command
+ result = self.request(action=self.driver.path, params=params,
+ data=data, headers=headers, method=method)
+
+ command = command.lower()
+
+ # Work around for older verions which don't return "response" suffix
+ # in delete ingress rule response command name
+ if (command == 'revokesecuritygroupingress' and
+ 'revokesecuritygroupingressresponse' not in result.object):
+ command = command
+ else:
+ command = command + 'response'
+
+ if command not in result.object:
+ raise MalformedResponseError(
+ "Unknown response format",
+ body=result.body,
+ driver=self.driver)
+ result = result.object[command]
+ return result
+
+
+class CloudStackDriverMixIn(object):
+ host = None
+ path = None
+
+ connectionCls = CloudStackConnection
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None):
+ host = host or self.host
+ super(CloudStackDriverMixIn, self).__init__(key, secret, secure, host,
+ port)
+
+ def _sync_request(self, command, action=None, params=None, data=None,
+ headers=None, method='GET'):
+ return self.connection._sync_request(command=command, action=action,
+ params=params, data=data,
+ headers=headers, method=method)
+
+ def _async_request(self, command, action=None, params=None, data=None,
+ headers=None, method='GET', context=None):
+ return self.connection._async_request(command=command, action=action,
+ params=params, data=data,
+ headers=headers, method=method,
+ context=context)
diff --git a/awx/lib/site-packages/libcloud/common/gandi.py b/awx/lib/site-packages/libcloud/common/gandi.py
new file mode 100644
index 0000000000..11e96221b8
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/gandi.py
@@ -0,0 +1,189 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Gandi driver base classes
+"""
+
+import time
+import hashlib
+import sys
+
+from libcloud.utils.py3 import b
+
+from libcloud.common.base import ConnectionKey
+from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
+
+# Global constants
+
+DEFAULT_TIMEOUT = 600 # operation pooling max seconds
+DEFAULT_INTERVAL = 20 # seconds between 2 operation.info
+
+
+class GandiException(Exception):
+ """
+ Exception class for Gandi driver
+ """
+ def __str__(self):
+ return '(%u) %s' % (self.args[0], self.args[1])
+
+ def __repr__(self):
+ return '' % (self.args[0], self.args[1])
+
+
+class GandiResponse(XMLRPCResponse):
+ """
+ A Base Gandi Response class to derive from.
+ """
+
+
+class GandiConnection(XMLRPCConnection, ConnectionKey):
+ """
+ Connection class for the Gandi driver
+ """
+
+ responseCls = GandiResponse
+ host = 'rpc.gandi.net'
+ endpoint = '/xmlrpc/'
+
+ def __init__(self, key, secure=True):
+ # Note: Method resolution order in this case is
+ # XMLRPCConnection -> Connection and Connection doesn't take key as the
+ # first argument so we specify a keyword argument instead.
+ # Previously it was GandiConnection -> ConnectionKey so it worked fine.
+ super(GandiConnection, self).__init__(key=key, secure=secure)
+ self.driver = BaseGandiDriver
+
+ def request(self, method, *args):
+ args = (self.key, ) + args
+ return super(GandiConnection, self).request(method, *args)
+
+
+class BaseGandiDriver(object):
+ """
+ Gandi base driver
+
+ """
+ connectionCls = GandiConnection
+ name = 'Gandi'
+
+ # Specific methods for gandi
+ def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT,
+ check_interval=DEFAULT_INTERVAL):
+ """ Wait for an operation to succeed"""
+
+ for i in range(0, timeout, check_interval):
+ try:
+ op = self.connection.request('operation.info', int(id)).object
+
+ if op['step'] == 'DONE':
+ return True
+ if op['step'] in ['ERROR', 'CANCEL']:
+ return False
+ except (KeyError, IndexError):
+ pass
+ except Exception:
+ e = sys.exc_info()[1]
+ raise GandiException(1002, e)
+
+ time.sleep(check_interval)
+ return False
+
+
+class BaseObject(object):
+ """Base class for objects not conventional"""
+
+ uuid_prefix = ''
+
+ def __init__(self, id, state, driver):
+ self.id = str(id) if id else None
+ self.state = state
+ self.driver = driver
+ self.uuid = self.get_uuid()
+
+ def get_uuid(self):
+ """Unique hash for this object
+
+ :return: ``str``
+
+ The hash is a function of an SHA1 hash of prefix, the object's ID and
+ its driver which means that it should be unique between all
+ interfaces.
+ TODO : to review
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> vif = driver.create_interface()
+ >>> vif.get_uuid()
+ 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
+
+ Note, for example, that this example will always produce the
+ same UUID!
+ """
+ hashstring = '%s:%s:%s' % \
+ (self.uuid_prefix, self.id, self.driver.type)
+ return hashlib.sha1(b(hashstring)).hexdigest()
+
+
+class IPAddress(BaseObject):
+ """
+ Provide a common interface for ip addresses
+ """
+
+ uuid_prefix = 'inet:'
+
+ def __init__(self, id, state, inet, driver, version=4, extra=None):
+ super(IPAddress, self).__init__(id, state, driver)
+ self.inet = inet
+ self.version = version
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.inet, self.state, self.driver.name))
+
+
+class NetworkInterface(BaseObject):
+ """
+ Provide a common interface for network interfaces
+ """
+
+ uuid_prefix = 'if:'
+
+ def __init__(self, id, state, mac_address, driver,
+ ips=None, node_id=None, extra=None):
+ super(NetworkInterface, self).__init__(id, state, driver)
+ self.mac = mac_address
+ self.ips = ips or {}
+ self.node_id = node_id
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.mac, self.state, self.driver.name))
+
+
+class Disk(BaseObject):
+ """
+ Gandi disk component
+ """
+ def __init__(self, id, state, name, driver, size, extra=None):
+ super(Disk, self).__init__(id, state, driver)
+ self.name = name
+ self.size = size
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (
+ ('')
+ % (self.id, self.name, self.state, self.size, self.driver.name))
diff --git a/awx/lib/site-packages/libcloud/common/gogrid.py b/awx/lib/site-packages/libcloud/common/gogrid.py
new file mode 100644
index 0000000000..e2448debdd
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/gogrid.py
@@ -0,0 +1,183 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+import time
+
+from libcloud.utils.py3 import b
+
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.common.types import MalformedResponseError
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse
+from libcloud.compute.base import NodeLocation
+
+HOST = 'api.gogrid.com'
+PORTS_BY_SECURITY = {True: 443, False: 80}
+API_VERSION = '1.8'
+
+__all__ = [
+ "GoGridResponse",
+ "GoGridConnection",
+ "GoGridIpAddress",
+ "BaseGoGridDriver",
+]
+
+
+class GoGridResponse(JsonResponse):
+
+ def __init__(self, *args, **kwargs):
+ self.driver = BaseGoGridDriver
+ super(GoGridResponse, self).__init__(*args, **kwargs)
+
+ def success(self):
+ if self.status == 403:
+ raise InvalidCredsError('Invalid credentials', self.driver)
+ if self.status == 401:
+ raise InvalidCredsError('API Key has insufficient rights',
+ self.driver)
+ if not self.body:
+ return None
+ try:
+ return self.parse_body()['status'] == 'success'
+ except ValueError:
+ raise MalformedResponseError('Malformed reply',
+ body=self.body,
+ driver=self.driver)
+
+ def parse_error(self):
+ try:
+ return self.parse_body()["list"][0]["message"]
+ except (ValueError, KeyError):
+ return None
+
+
+class GoGridConnection(ConnectionUserAndKey):
+ """
+ Connection class for the GoGrid driver
+ """
+
+ host = HOST
+ responseCls = GoGridResponse
+
+ def add_default_params(self, params):
+ params["api_key"] = self.user_id
+ params["v"] = API_VERSION
+ params["format"] = 'json'
+ params["sig"] = self.get_signature(self.user_id, self.key)
+
+ return params
+
+ def get_signature(self, key, secret):
+ """ create sig from md5 of key + secret + time """
+ m = hashlib.md5(b(key + secret + str(int(time.time()))))
+ return m.hexdigest()
+
+ def request(self, action, params=None, data='', headers=None, method='GET',
+ raw=False):
+ return super(GoGridConnection, self).request(action, params, data,
+ headers, method, raw)
+
+
+class GoGridIpAddress(object):
+ """
+ IP Address
+ """
+
+ def __init__(self, id, ip, public, state, subnet):
+ self.id = id
+ self.ip = ip
+ self.public = public
+ self.state = state
+ self.subnet = subnet
+
+
+class BaseGoGridDriver(object):
+ """GoGrid has common object model for services they
+ provide, like locations and IP, so keep handling of
+ these things in a single place."""
+
+ name = "GoGrid"
+
+ def _get_ip(self, element):
+ return element.get('ip').get('ip')
+
+ def _to_ip(self, element):
+ ip = GoGridIpAddress(id=element['id'],
+ ip=element['ip'],
+ public=element['public'],
+ subnet=element['subnet'],
+ state=element["state"]["name"])
+ ip.location = self._to_location(element['datacenter'])
+ return ip
+
+ def _to_ips(self, object):
+ return [self._to_ip(el)
+ for el in object['list']]
+
+ def _to_location(self, element):
+ location = NodeLocation(id=element['id'],
+ name=element['name'],
+ country="US",
+ driver=self.connection.driver)
+ return location
+
+ def _to_locations(self, object):
+ return [self._to_location(el)
+ for el in object['list']]
+
+ def ex_list_ips(self, **kwargs):
+ """Return list of IP addresses assigned to
+ the account.
+
+ :keyword public: set to True to list only
+ public IPs or False to list only
+ private IPs. Set to None or not specify
+ at all not to filter by type
+ :type public: ``bool``
+
+ :keyword assigned: set to True to list only addresses
+ assigned to servers, False to list unassigned
+ addresses and set to None or don't set at all
+ not no filter by state
+ :type assigned: ``bool``
+
+ :keyword location: filter IP addresses by location
+ :type location: :class:`NodeLocation`
+
+ :rtype: ``list`` of :class:`GoGridIpAddress`
+ """
+
+ params = {}
+
+ if "public" in kwargs and kwargs["public"] is not None:
+ params["ip.type"] = {True: "Public",
+ False: "Private"}[kwargs["public"]]
+ if "assigned" in kwargs and kwargs["assigned"] is not None:
+ params["ip.state"] = {True: "Assigned",
+ False: "Unassigned"}[kwargs["assigned"]]
+ if "location" in kwargs and kwargs['location'] is not None:
+ params['datacenter'] = kwargs['location'].id
+
+ response = self.connection.request('/api/grid/ip/list', params=params)
+ ips = self._to_ips(response.object)
+ return ips
+
+ def _get_first_ip(self, location=None):
+ ips = self.ex_list_ips(public=True, assigned=False, location=location)
+ try:
+ return ips[0].ip
+ except IndexError:
+ raise LibcloudError('No public unassigned IPs left',
+ self.driver)
diff --git a/awx/lib/site-packages/libcloud/common/google.py b/awx/lib/site-packages/libcloud/common/google.py
new file mode 100644
index 0000000000..725c3832aa
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/google.py
@@ -0,0 +1,671 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Module for Google Connection and Authentication classes.
+
+Information about setting up your Google OAUTH2 credentials:
+
+For libcloud, there are two basic methods for authenticating to Google using
+OAUTH2: Service Accounts and Client IDs for Installed Applications.
+
+Both are initially set up from the Cloud Console_
+_Console: https://cloud.google.com/console
+
+Setting up Service Account authentication (note that you need the PyCrypto
+package installed to use this):
+ - Go to the Console
+ - Go to your project and then to "APIs & auth" on the left
+ - Click on "Credentials"
+ - Click on "Create New Client ID..."
+ - Select "Service account" and click on "Create Client ID"
+ - Download the Private Key (should happen automatically).
+ - The key that you download is a PKCS12 key. It needs to be converted to
+ the PEM format.
+ - Convert the key using OpenSSL (the default password is 'notasecret'):
+ ``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts
+ -passin pass:notasecret | openssl rsa -out PRIV.pem``
+ - Move the .pem file to a safe location.
+ - To Authenticate, you will need to pass the Service Account's "Email
+ address" in as the user_id and the path to the .pem file as the key.
+
+Setting up Installed Application authentication:
+ - Go to the Console
+ - Go to your project and then to "APIs & auth" on the left
+ - Click on "Credentials"
+ - Select "Installed application" and "Other" then click on
+ "Create Client ID"
+ - To Authenticate, pass in the "Client ID" as the user_id and the "Client
+ secret" as the key
+ - The first time that you do this, the libcloud will give you a URL to
+ visit. Copy and paste the URL into a browser.
+ - When you go to the URL it will ask you to log in (if you aren't already)
+ and ask you if you want to allow the project access to your account.
+ - Click on Accept and you will be given a code.
+ - Paste that code at the prompt given to you by the Google libcloud
+ connection.
+ - At that point, a token & refresh token will be stored in your home
+ directory and will be used for authentication.
+
+Please remember to secure your keys and access tokens.
+"""
+from __future__ import with_statement
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+import base64
+import errno
+import time
+import datetime
+import os
+import socket
+import sys
+
+from libcloud.utils.py3 import httplib, urlencode, urlparse, PY3
+from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
+ PollingConnection)
+from libcloud.common.types import (ProviderError,
+ LibcloudError)
+
+try:
+ from Crypto.Hash import SHA256
+ from Crypto.PublicKey import RSA
+ from Crypto.Signature import PKCS1_v1_5
+ import Crypto.Random
+ Crypto.Random.atfork()
+except ImportError:
+ # The pycrypto library is unavailable
+ SHA256 = None
+ RSA = None
+ PKCS1_v1_5 = None
+
+TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
+
+
+class GoogleAuthError(LibcloudError):
+ """Generic Error class for various authentication errors."""
+ def __init__(self, value):
+ self.value = value
+
+ def __repr__(self):
+ return repr(self.value)
+
+
+class GoogleBaseError(ProviderError):
+ def __init__(self, value, http_code, code, driver=None):
+ self.code = code
+ super(GoogleBaseError, self).__init__(value, http_code, driver)
+
+
+class InvalidRequestError(GoogleBaseError):
+ pass
+
+
+class JsonParseError(GoogleBaseError):
+ pass
+
+
+class ResourceNotFoundError(GoogleBaseError):
+ pass
+
+
+class QuotaExceededError(GoogleBaseError):
+ pass
+
+
+class ResourceExistsError(GoogleBaseError):
+ pass
+
+
+class ResourceInUseError(GoogleBaseError):
+ pass
+
+
+class GoogleResponse(JsonResponse):
+ """
+ Google Base Response class.
+ """
+ def success(self):
+ """
+ Determine if the request was successful.
+
+ For the Google response class, tag all responses as successful and
+ raise appropriate Exceptions from parse_body.
+
+ :return: C{True}
+ """
+ return True
+
+ def _get_error(self, body):
+ """
+ Get the error code and message from a JSON response.
+
+ Return just the first error if there are multiple errors.
+
+ :param body: The body of the JSON response dictionary
+ :type body: ``dict``
+
+ :return: Tuple containing error code and message
+ :rtype: ``tuple`` of ``str`` or ``int``
+ """
+ if 'errors' in body['error']:
+ err = body['error']['errors'][0]
+ else:
+ err = body['error']
+
+ if 'code' in err:
+ code = err.get('code')
+ message = err.get('message')
+ else:
+ code = None
+ message = body.get('error_description', err)
+
+ return (code, message)
+
+ def parse_body(self):
+ """
+ Parse the JSON response body, or raise exceptions as appropriate.
+
+ :return: JSON dictionary
+ :rtype: ``dict``
+ """
+ if len(self.body) == 0 and not self.parse_zero_length_body:
+ return self.body
+
+ json_error = False
+ try:
+ body = json.loads(self.body)
+ except:
+ # If there is both a JSON parsing error and an unsuccessful http
+ # response (like a 404), we want to raise the http error and not
+ # the JSON one, so don't raise JsonParseError here.
+ body = self.body
+ json_error = True
+
+ if self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]:
+ if json_error:
+ raise JsonParseError(body, self.status, None)
+ elif 'error' in body:
+ (code, message) = self._get_error(body)
+ if code == 'QUOTA_EXCEEDED':
+ raise QuotaExceededError(message, self.status, code)
+ elif code == 'RESOURCE_ALREADY_EXISTS':
+ raise ResourceExistsError(message, self.status, code)
+ elif code.startswith('RESOURCE_IN_USE'):
+ raise ResourceInUseError(message, self.status, code)
+ else:
+ raise GoogleBaseError(message, self.status, code)
+ else:
+ return body
+
+ elif self.status == httplib.NOT_FOUND:
+ if (not json_error) and ('error' in body):
+ (code, message) = self._get_error(body)
+ else:
+ message = body
+ code = None
+ raise ResourceNotFoundError(message, self.status, code)
+
+ elif self.status == httplib.BAD_REQUEST:
+ if (not json_error) and ('error' in body):
+ (code, message) = self._get_error(body)
+ else:
+ message = body
+ code = None
+ raise InvalidRequestError(message, self.status, code)
+
+ else:
+ if (not json_error) and ('error' in body):
+ (code, message) = self._get_error(body)
+ else:
+ message = body
+ code = None
+ raise GoogleBaseError(message, self.status, code)
+
+
+class GoogleBaseDriver(object):
+ name = "Google API"
+
+
+class GoogleBaseAuthConnection(ConnectionUserAndKey):
+ """
+ Base class for Google Authentication. Should be subclassed for specific
+ types of authentication.
+ """
+ driver = GoogleBaseDriver
+ responseCls = GoogleResponse
+ name = 'Google Auth'
+ host = 'accounts.google.com'
+ auth_path = '/o/oauth2/auth'
+
+ def __init__(self, user_id, key, scopes=None,
+ redirect_uri='urn:ietf:wg:oauth:2.0:oob',
+ login_hint=None, **kwargs):
+ """
+ :param user_id: The email address (for service accounts) or Client ID
+ (for installed apps) to be used for authentication.
+ :type user_id: ``str``
+
+ :param key: The RSA Key (for service accounts) or file path containing
+ key or Client Secret (for installed apps) to be used for
+ authentication.
+ :type key: ``str``
+
+ :param scopes: A list of urls defining the scope of authentication
+ to grant.
+ :type scopes: ``list``
+
+ :keyword redirect_uri: The Redirect URI for the authentication
+ request. See Google OAUTH2 documentation for
+ more info.
+ :type redirect_uri: ``str``
+
+ :keyword login_hint: Login hint for authentication request. Useful
+ for Installed Application authentication.
+ :type login_hint: ``str``
+ """
+ scopes = scopes or []
+
+ self.scopes = " ".join(scopes)
+ self.redirect_uri = redirect_uri
+ self.login_hint = login_hint
+
+ super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
+
+ def _now(self):
+ return datetime.datetime.utcnow()
+
+ def add_default_headers(self, headers):
+ headers['Content-Type'] = "application/x-www-form-urlencoded"
+ headers['Host'] = self.host
+ return headers
+
+ def _token_request(self, request_body):
+ """
+ Return an updated token from a token request body.
+
+ :param request_body: A dictionary of values to send in the body of the
+ token request.
+ :type request_body: ``dict``
+
+ :return: A dictionary with updated token information
+ :rtype: ``dict``
+ """
+ data = urlencode(request_body)
+ now = self._now()
+ response = self.request('/o/oauth2/token', method='POST', data=data)
+ token_info = response.object
+ if 'expires_in' in token_info:
+ expire_time = now + datetime.timedelta(
+ seconds=token_info['expires_in'])
+ token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT)
+ return token_info
+
+
+class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
+ """Authentication connection for "Installed Application" authentication."""
+ def get_code(self):
+ """
+ Give the user a URL that they can visit to authenticate and obtain a
+ code. This method will ask for that code that the user can paste in.
+
+ :return: Code supplied by the user after authenticating
+ :rtype: ``str``
+ """
+ auth_params = {'response_type': 'code',
+ 'client_id': self.user_id,
+ 'redirect_uri': self.redirect_uri,
+ 'scope': self.scopes,
+ 'state': 'Libcloud Request'}
+ if self.login_hint:
+ auth_params['login_hint'] = self.login_hint
+
+ data = urlencode(auth_params)
+
+ url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
+ print('Please Go to the following URL and sign in:')
+ print(url)
+ if PY3:
+ code = input('Enter Code:')
+ else:
+ code = raw_input('Enter Code:')
+ return code
+
+ def get_new_token(self):
+ """
+ Get a new token. Generally used when no previous token exists or there
+ is no refresh token
+
+ :return: Dictionary containing token information
+ :rtype: ``dict``
+ """
+ # Ask the user for a code
+ code = self.get_code()
+
+ token_request = {'code': code,
+ 'client_id': self.user_id,
+ 'client_secret': self.key,
+ 'redirect_uri': self.redirect_uri,
+ 'grant_type': 'authorization_code'}
+
+ return self._token_request(token_request)
+
+ def refresh_token(self, token_info):
+ """
+ Use the refresh token supplied in the token info to get a new token.
+
+ :param token_info: Dictionary containing current token information
+ :type token_info: ``dict``
+
+ :return: A dictionary containing updated token information.
+ :rtype: ``dict``
+ """
+ if 'refresh_token' not in token_info:
+ return self.get_new_token()
+ refresh_request = {'refresh_token': token_info['refresh_token'],
+ 'client_id': self.user_id,
+ 'client_secret': self.key,
+ 'grant_type': 'refresh_token'}
+
+ new_token = self._token_request(refresh_request)
+ if 'refresh_token' not in new_token:
+ new_token['refresh_token'] = token_info['refresh_token']
+ return new_token
+
+
+class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
+ """Authentication class for "Service Account" authentication."""
+ def __init__(self, user_id, key, *args, **kwargs):
+ """
+ Check to see if PyCrypto is available, and convert key file path into a
+ key string if the key is in a file.
+
+ :param user_id: Email address to be used for Service Account
+ authentication.
+ :type user_id: ``str``
+
+ :param key: The RSA Key or path to file containing the key.
+ :type key: ``str``
+ """
+ if SHA256 is None:
+ raise GoogleAuthError('PyCrypto library required for '
+ 'Service Account Authentication.')
+ # Check to see if 'key' is a file and read the file if it is.
+ keypath = os.path.expanduser(key)
+ is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
+ if is_file_path:
+ with open(keypath, 'r') as f:
+ key = f.read()
+ super(GoogleServiceAcctAuthConnection, self).__init__(
+ user_id, key, *args, **kwargs)
+
+ def get_new_token(self):
+ """
+ Get a new token using the email address and RSA Key.
+
+ :return: Dictionary containing token information
+ :rtype: ``dict``
+ """
+ # The header is always the same
+ header = {'alg': 'RS256', 'typ': 'JWT'}
+ header_enc = base64.urlsafe_b64encode(json.dumps(header))
+
+ # Construct a claim set
+ claim_set = {'iss': self.user_id,
+ 'scope': self.scopes,
+ 'aud': 'https://accounts.google.com/o/oauth2/token',
+ 'exp': int(time.time()) + 3600,
+ 'iat': int(time.time())}
+ claim_set_enc = base64.urlsafe_b64encode(json.dumps(claim_set))
+
+ # The message contains both the header and claim set
+ message = '%s.%s' % (header_enc, claim_set_enc)
+ # Then the message is signed using the key supplied
+ key = RSA.importKey(self.key)
+ hash_func = SHA256.new(message)
+ signer = PKCS1_v1_5.new(key)
+ signature = base64.urlsafe_b64encode(signer.sign(hash_func))
+
+ # Finally the message and signature are sent to get a token
+ jwt = '%s.%s' % (message, signature)
+ request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
+ 'assertion': jwt}
+
+ return self._token_request(request)
+
+ def refresh_token(self, token_info):
+ """
+ Refresh the current token.
+
+ Service Account authentication doesn't supply a "refresh token" so
+ this simply gets a new token using the email address/key.
+
+ :param token_info: Dictionary containing token information.
+ (Not used, but here for compatibility)
+ :type token_info: ``dict``
+
+ :return: A dictionary containing updated token information.
+ :rtype: ``dict``
+ """
+ return self.get_new_token()
+
+
+class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
+ """Base connection class for interacting with Google APIs."""
+ driver = GoogleBaseDriver
+ responseCls = GoogleResponse
+ host = 'www.googleapis.com'
+ poll_interval = 2.0
+ timeout = 180
+
+ def __init__(self, user_id, key, auth_type=None,
+ credential_file=None, scopes=None, **kwargs):
+ """
+ Determine authentication type, set up appropriate authentication
+ connection and get initial authentication information.
+
+ :param user_id: The email address (for service accounts) or Client ID
+ (for installed apps) to be used for authentication.
+ :type user_id: ``str``
+
+ :param key: The RSA Key (for service accounts) or file path containing
+ key or Client Secret (for installed apps) to be used for
+ authentication.
+ :type key: ``str``
+
+ :keyword auth_type: Accepted values are "SA" or "IA"
+ ("Service Account" or "Installed Application").
+ If not supplied, auth_type will be guessed based
+ on value of user_id.
+ :type auth_type: ``str``
+
+ :keyword credential_file: Path to file for caching authentication
+ information.
+ :type credential_file: ``str``
+
+ :keyword scopes: List of OAuth2 scope URLs. The empty default sets
+ read/write access to Compute, Storage, and DNS.
+ :type scopes: ``list``
+ """
+ self.credential_file = credential_file or '~/.gce_libcloud_auth'
+
+ if auth_type is None:
+ # Try to guess. Service accounts use an email address
+ # as the user id.
+ if '@' in user_id:
+ auth_type = 'SA'
+ else:
+ auth_type = 'IA'
+
+ # Default scopes to read/write for compute, storage, and dns. Can
+ # override this when calling get_driver() or setting in secrets.py
+ self.scopes = scopes
+ if not self.scopes:
+ self.scopes = [
+ 'https://www.googleapis.com/auth/compute',
+ 'https://www.googleapis.com/auth/devstorage.full_control',
+ 'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
+ ]
+ self.token_info = self._get_token_info_from_file()
+
+ if auth_type == 'SA':
+ self.auth_conn = GoogleServiceAcctAuthConnection(
+ user_id, key, self.scopes, **kwargs)
+ elif auth_type == 'IA':
+ self.auth_conn = GoogleInstalledAppAuthConnection(
+ user_id, key, self.scopes, **kwargs)
+ else:
+ raise GoogleAuthError('auth_type should be \'SA\' or \'IA\'')
+
+ if self.token_info is None:
+ self.token_info = self.auth_conn.get_new_token()
+ self._write_token_info_to_file()
+
+ self.token_expire_time = datetime.datetime.strptime(
+ self.token_info['expire_time'], TIMESTAMP_FORMAT)
+
+ super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
+
+ python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
+ sys.version_info[2])
+ ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
+ self.user_agent_append(ver_platform)
+
+ def _now(self):
+ return datetime.datetime.utcnow()
+
+ def add_default_headers(self, headers):
+ """
+ @inherits: :class:`Connection.add_default_headers`
+ """
+ headers['Content-Type'] = "application/json"
+ headers['Host'] = self.host
+ return headers
+
+ def pre_connect_hook(self, params, headers):
+ """
+ Check to make sure that token hasn't expired. If it has, get an
+ updated token. Also, add the token to the headers.
+
+ @inherits: :class:`Connection.pre_connect_hook`
+ """
+ now = self._now()
+ if self.token_expire_time < now:
+ self.token_info = self.auth_conn.refresh_token(self.token_info)
+ self.token_expire_time = datetime.datetime.strptime(
+ self.token_info['expire_time'], TIMESTAMP_FORMAT)
+ self._write_token_info_to_file()
+ headers['Authorization'] = 'Bearer %s' % (
+ self.token_info['access_token'])
+
+ return params, headers
+
+ def encode_data(self, data):
+ """Encode data to JSON"""
+ return json.dumps(data)
+
+ def request(self, *args, **kwargs):
+ """
+ @inherits: :class:`Connection.request`
+ """
+ # Adds some retry logic for the occasional
+ # "Connection Reset by peer" error.
+ retries = 4
+ tries = 0
+ while tries < (retries - 1):
+ try:
+ return super(GoogleBaseConnection, self).request(
+ *args, **kwargs)
+ except socket.error:
+ e = sys.exc_info()[1]
+ if e.errno == errno.ECONNRESET:
+ tries = tries + 1
+ else:
+ raise e
+ # One more time, then give up.
+ return super(GoogleBaseConnection, self).request(*args, **kwargs)
+
+ def _get_token_info_from_file(self):
+ """
+ Read credential file and return token information.
+
+ :return: Token information dictionary, or None
+ :rtype: ``dict`` or ``None``
+ """
+ token_info = None
+ filename = os.path.realpath(os.path.expanduser(self.credential_file))
+
+ try:
+ with open(filename, 'r') as f:
+ data = f.read()
+ token_info = json.loads(data)
+ except IOError:
+ pass
+ return token_info
+
+ def _write_token_info_to_file(self):
+ """
+ Write token_info to credential file.
+ """
+ filename = os.path.realpath(os.path.expanduser(self.credential_file))
+ data = json.dumps(self.token_info)
+ with open(filename, 'w') as f:
+ f.write(data)
+
+ def has_completed(self, response):
+ """
+ Determine if operation has completed based on response.
+
+ :param response: JSON response
+ :type response: I{responseCls}
+
+ :return: True if complete, False otherwise
+ :rtype: ``bool``
+ """
+ if response.object['status'] == 'DONE':
+ return True
+ else:
+ return False
+
+ def get_poll_request_kwargs(self, response, context, request_kwargs):
+ """
+ @inherits: :class:`PollingConnection.get_poll_request_kwargs`
+ """
+ return {'action': response.object['selfLink']}
+
+ def morph_action_hook(self, action):
+ """
+ Update action to correct request path.
+
+ In many places, the Google API returns a full URL to a resource.
+ This will strip the scheme and host off of the path and just return
+ the request. Otherwise, it will append the base request_path to
+ the action.
+
+ :param action: The action to be called in the http request
+ :type action: ``str``
+
+ :return: The modified request based on the action
+ :rtype: ``str``
+ """
+ if action.startswith('https://'):
+ u = urlparse.urlsplit(action)
+ request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
+ else:
+ request = self.request_path + action
+ return request
diff --git a/awx/lib/site-packages/libcloud/common/hostvirtual.py b/awx/lib/site-packages/libcloud/common/hostvirtual.py
new file mode 100644
index 0000000000..e7ce14daba
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/hostvirtual.py
@@ -0,0 +1,77 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.py3 import httplib
+from libcloud.common.base import ConnectionKey, JsonResponse
+from libcloud.compute.types import InvalidCredsError
+from libcloud.common.types import LibcloudError
+
+API_HOST = 'vapi.vr.org'
+
+
+class HostVirtualException(LibcloudError):
+ def __init__(self, code, message):
+ self.code = code
+ self.message = message
+ self.args = (code, message)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return '' % (self.code, self.message)
+
+
+class HostVirtualConnection(ConnectionKey):
+ host = API_HOST
+
+ allow_insecure = False
+
+ def add_default_params(self, params):
+ params['key'] = self.key
+ return params
+
+
+class HostVirtualResponse(JsonResponse):
+ valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
+ httplib.NO_CONTENT]
+
+ def parse_body(self):
+ if not self.body:
+ return None
+
+ data = json.loads(self.body)
+ return data
+
+ def parse_error(self):
+ data = self.parse_body()
+
+ if self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError('%(code)s:%(message)s' % (data['error']))
+ elif self.status == httplib.PRECONDITION_FAILED:
+ raise HostVirtualException(
+ data['error']['code'], data['error']['message'])
+ elif self.status == httplib.NOT_FOUND:
+ raise HostVirtualException(
+ data['error']['code'], data['error']['message'])
+
+ return self.body
+
+ def success(self):
+ return self.status in self.valid_response_codes
diff --git a/awx/lib/site-packages/libcloud/common/linode.py b/awx/lib/site-packages/libcloud/common/linode.py
new file mode 100644
index 0000000000..f7ee22bdd2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/linode.py
@@ -0,0 +1,176 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.common.base import ConnectionKey, JsonResponse
+from libcloud.common.types import InvalidCredsError
+
+from libcloud.utils.py3 import PY3
+from libcloud.utils.py3 import b
+
+__all__ = [
+ 'API_HOST',
+ 'API_ROOT',
+ 'LinodeException',
+ 'LinodeResponse',
+ 'LinodeConnection'
+]
+
+# Endpoint for the Linode API
+API_HOST = 'api.linode.com'
+API_ROOT = '/'
+
+# Constants that map a RAM figure to a PlanID (updated 4/25/14)
+LINODE_PLAN_IDS = {2048: '1',
+ 4096: '3',
+ 8192: '5',
+ 16384: '6',
+ 32768: '7',
+ 49152: '8',
+ 65536: '9',
+ 98304: '11'}
+
+
+class LinodeException(Exception):
+ """Error originating from the Linode API
+
+ This class wraps a Linode API error, a list of which is available in the
+ API documentation. All Linode API errors are a numeric code and a
+ human-readable description.
+ """
+ def __init__(self, code, message):
+ self.code = code
+ self.message = message
+ self.args = (code, message)
+
+ def __str__(self):
+ return "(%u) %s" % (self.code, self.message)
+
+ def __repr__(self):
+ return "" % (self.code, self.message)
+
+
+class LinodeResponse(JsonResponse):
+ """Linode API response
+
+ Wraps the HTTP response returned by the Linode API, which should be JSON in
+ this structure:
+
+ {
+ "ERRORARRAY": [ ... ],
+ "DATA": [ ... ],
+ "ACTION": " ... "
+ }
+
+ libcloud does not take advantage of batching, so a response will always
+ reflect the above format. A few weird quirks are caught here as well."""
+ def __init__(self, response, connection):
+ """Instantiate a LinodeResponse from the HTTP response
+
+ :keyword response: The raw response returned by urllib
+ :return: parsed :class:`LinodeResponse`"""
+
+ self.connection = connection
+
+ self.headers = dict(response.getheaders())
+ self.error = response.reason
+ self.status = response.status
+
+ self.body = self._decompress_response(body=response.read(),
+ headers=self.headers)
+
+ if PY3:
+ self.body = b(self.body).decode('utf-8')
+
+ self.invalid = LinodeException(0xFF,
+ "Invalid JSON received from server")
+
+ # Move parse_body() to here; we can't be sure of failure until we've
+ # parsed the body into JSON.
+ self.objects, self.errors = self.parse_body()
+
+ if not self.success():
+ # Raise the first error, as there will usually only be one
+ raise self.errors[0]
+
+ def parse_body(self):
+ """Parse the body of the response into JSON objects
+
+ If the response chokes the parser, action and data will be returned as
+ None and errorarray will indicate an invalid JSON exception.
+
+ :return: ``list`` of objects and ``list`` of errors"""
+ js = super(LinodeResponse, self).parse_body()
+
+ try:
+ if isinstance(js, dict):
+ # solitary response - promote to list
+ js = [js]
+ ret = []
+ errs = []
+ for obj in js:
+ if ("DATA" not in obj or "ERRORARRAY" not in obj
+ or "ACTION" not in obj):
+ ret.append(None)
+ errs.append(self.invalid)
+ continue
+ ret.append(obj["DATA"])
+ errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"])
+ return (ret, errs)
+ except:
+ return (None, [self.invalid])
+
+ def success(self):
+ """Check the response for success
+
+ The way we determine success is by the presence of an error in
+ ERRORARRAY. If one is there, we assume the whole request failed.
+
+ :return: ``bool`` indicating a successful request"""
+ return len(self.errors) == 0
+
+ def _make_excp(self, error):
+ """Convert an API error to a LinodeException instance
+
+ :keyword error: JSON object containing ``ERRORCODE`` and
+ ``ERRORMESSAGE``
+ :type error: dict"""
+ if "ERRORCODE" not in error or "ERRORMESSAGE" not in error:
+ return None
+ if error["ERRORCODE"] == 4:
+ return InvalidCredsError(error["ERRORMESSAGE"])
+ return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"])
+
+
+class LinodeConnection(ConnectionKey):
+ """
+ A connection to the Linode API
+
+ Wraps SSL connections to the Linode API, automagically injecting the
+ parameters that the API needs for each request.
+ """
+ host = API_HOST
+ responseCls = LinodeResponse
+
+ def add_default_params(self, params):
+ """
+ Add parameters that are necessary for every request
+
+ This method adds ``api_key`` and ``api_responseFormat`` to
+ the request.
+ """
+ params["api_key"] = self.key
+ # Be explicit about this in case the default changes.
+ params["api_responseFormat"] = "json"
+ return params
diff --git a/awx/lib/site-packages/libcloud/common/openstack.py b/awx/lib/site-packages/libcloud/common/openstack.py
new file mode 100644
index 0000000000..26bf6920b5
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/openstack.py
@@ -0,0 +1,652 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common utilities for OpenStack
+"""
+import sys
+import datetime
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.iso8601 import parse_date
+
+from libcloud.common.base import ConnectionUserAndKey, Response
+from libcloud.compute.types import (LibcloudError, InvalidCredsError,
+ MalformedResponseError)
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+AUTH_API_VERSION = '1.1'
+
+# Auth versions which contain token expiration information.
+AUTH_VERSIONS_WITH_EXPIRES = [
+ '1.1',
+ '2.0',
+ '2.0_apikey',
+ '2.0_password'
+]
+
+# How many seconds to substract from the auth token expiration time before
+# testing if the token is still valid.
+# The time is subtracted to account for the HTTP request latency and prevent
+# user from getting "InvalidCredsError" if token is about to expire.
+AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5
+
+__all__ = [
+ 'OpenStackBaseConnection',
+ 'OpenStackAuthConnection',
+ 'OpenStackServiceCatalog',
+ 'OpenStackDriverMixin',
+
+ 'AUTH_TOKEN_EXPIRES_GRACE_SECONDS'
+]
+
+
+# @TODO: Refactor for re-use by other openstack drivers
+class OpenStackAuthResponse(Response):
+ def success(self):
+ return True
+
+ def parse_body(self):
+ if not self.body:
+ return None
+
+ if 'content-type' in self.headers:
+ key = 'content-type'
+ elif 'Content-Type' in self.headers:
+ key = 'Content-Type'
+ else:
+ raise LibcloudError('Missing content-type header',
+ driver=OpenStackAuthConnection)
+
+ content_type = self.headers[key]
+ if content_type.find(';') != -1:
+ content_type = content_type.split(';')[0]
+
+ if content_type == 'application/json':
+ try:
+ data = json.loads(self.body)
+ except:
+ raise MalformedResponseError('Failed to parse JSON',
+ body=self.body,
+ driver=OpenStackAuthConnection)
+ elif content_type == 'text/plain':
+ data = self.body
+ else:
+ data = self.body
+
+ return data
+
+
+class OpenStackAuthConnection(ConnectionUserAndKey):
+
+ responseCls = OpenStackAuthResponse
+ name = 'OpenStack Auth'
+ timeout = None
+
+ def __init__(self, parent_conn, auth_url, auth_version, user_id, key,
+ tenant_name=None, timeout=None):
+ self.parent_conn = parent_conn
+ # enable tests to use the same mock connection classes.
+ self.conn_classes = parent_conn.conn_classes
+
+ super(OpenStackAuthConnection, self).__init__(
+ user_id, key, url=auth_url, timeout=timeout)
+
+ self.auth_version = auth_version
+ self.auth_url = auth_url
+ self.driver = self.parent_conn.driver
+ self.tenant_name = tenant_name
+ self.timeout = timeout
+
+ self.urls = {}
+ self.auth_token = None
+ self.auth_token_expires = None
+ self.auth_user_info = None
+
+ def morph_action_hook(self, action):
+ (_, _, _, request_path) = self._tuple_from_url(self.auth_url)
+
+ if request_path == '':
+ # No path is provided in the auth_url, use action passed to this
+ # method.
+ return action
+
+ return request_path
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'application/json'
+ headers['Content-Type'] = 'application/json; charset=UTF-8'
+ return headers
+
+ def authenticate(self, force=False):
+ """
+ Authenticate against the keystone api.
+
+ :param force: Forcefully update the token even if it's already cached
+ and still valid.
+ :type force: ``bool``
+ """
+ if not force and self.auth_version in AUTH_VERSIONS_WITH_EXPIRES \
+ and self.is_token_valid():
+ # If token is still valid, there is no need to re-authenticate
+ return self
+
+ if self.auth_version == "1.0":
+ return self.authenticate_1_0()
+ elif self.auth_version == "1.1":
+ return self.authenticate_1_1()
+ elif self.auth_version == "2.0" or self.auth_version == "2.0_apikey":
+ return self.authenticate_2_0_with_apikey()
+ elif self.auth_version == "2.0_password":
+ return self.authenticate_2_0_with_password()
+ else:
+ raise LibcloudError('Unsupported Auth Version requested')
+
+ def authenticate_1_0(self):
+ headers = {
+ 'X-Auth-User': self.user_id,
+ 'X-Auth-Key': self.key,
+ }
+
+ resp = self.request('/v1.0', headers=headers, method='GET')
+
+ if resp.status == httplib.UNAUTHORIZED:
+ # HTTP UNAUTHORIZED (401): auth failed
+ raise InvalidCredsError()
+ elif resp.status not in [httplib.NO_CONTENT, httplib.OK]:
+ body = 'code: %s body:%s headers:%s' % (resp.status,
+ resp.body,
+ resp.headers)
+ raise MalformedResponseError('Malformed response', body=body,
+ driver=self.driver)
+ else:
+ headers = resp.headers
+ # emulate the auth 1.1 URL list
+ self.urls = {}
+ self.urls['cloudServers'] = \
+ [{'publicURL': headers.get('x-server-management-url', None)}]
+ self.urls['cloudFilesCDN'] = \
+ [{'publicURL': headers.get('x-cdn-management-url', None)}]
+ self.urls['cloudFiles'] = \
+ [{'publicURL': headers.get('x-storage-url', None)}]
+ self.auth_token = headers.get('x-auth-token', None)
+ self.auth_user_info = None
+
+ if not self.auth_token:
+ raise MalformedResponseError('Missing X-Auth-Token in \
+ response headers')
+
+ return self
+
+ def authenticate_1_1(self):
+ reqbody = json.dumps({'credentials': {'username': self.user_id,
+ 'key': self.key}})
+ resp = self.request('/v1.1/auth', data=reqbody, headers={},
+ method='POST')
+
+ if resp.status == httplib.UNAUTHORIZED:
+ # HTTP UNAUTHORIZED (401): auth failed
+ raise InvalidCredsError()
+ elif resp.status != httplib.OK:
+ body = 'code: %s body:%s' % (resp.status, resp.body)
+ raise MalformedResponseError('Malformed response', body=body,
+ driver=self.driver)
+ else:
+ try:
+ body = json.loads(resp.body)
+ except Exception:
+ e = sys.exc_info()[1]
+ raise MalformedResponseError('Failed to parse JSON', e)
+
+ try:
+ expires = body['auth']['token']['expires']
+
+ self.auth_token = body['auth']['token']['id']
+ self.auth_token_expires = parse_date(expires)
+ self.urls = body['auth']['serviceCatalog']
+ self.auth_user_info = None
+ except KeyError:
+ e = sys.exc_info()[1]
+ raise MalformedResponseError('Auth JSON response is \
+ missing required elements', e)
+
+ return self
+
+ def authenticate_2_0_with_apikey(self):
+ # API Key based authentication uses the RAX-KSKEY extension.
+ # http://s.apache.org/oAi
+ data = {'auth':
+ {'RAX-KSKEY:apiKeyCredentials':
+ {'username': self.user_id, 'apiKey': self.key}}}
+ if self.tenant_name:
+ data['auth']['tenantName'] = self.tenant_name
+ reqbody = json.dumps(data)
+ return self.authenticate_2_0_with_body(reqbody)
+
+ def authenticate_2_0_with_password(self):
+ # Password based authentication is the only 'core' authentication
+ # method in Keystone at this time.
+ # 'keystone' - http://s.apache.org/e8h
+ data = {'auth':
+ {'passwordCredentials':
+ {'username': self.user_id, 'password': self.key}}}
+ if self.tenant_name:
+ data['auth']['tenantName'] = self.tenant_name
+ reqbody = json.dumps(data)
+ return self.authenticate_2_0_with_body(reqbody)
+
+ def authenticate_2_0_with_body(self, reqbody):
+ resp = self.request('/v2.0/tokens', data=reqbody,
+ headers={'Content-Type': 'application/json'},
+ method='POST')
+ if resp.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError()
+ elif resp.status not in [httplib.OK,
+ httplib.NON_AUTHORITATIVE_INFORMATION]:
+ body = 'code: %s body: %s' % (resp.status, resp.body)
+ raise MalformedResponseError('Malformed response', body=body,
+ driver=self.driver)
+ else:
+ try:
+ body = json.loads(resp.body)
+ except Exception:
+ e = sys.exc_info()[1]
+ raise MalformedResponseError('Failed to parse JSON', e)
+
+ try:
+ access = body['access']
+ expires = access['token']['expires']
+
+ self.auth_token = access['token']['id']
+ self.auth_token_expires = parse_date(expires)
+ self.urls = access['serviceCatalog']
+ self.auth_user_info = access.get('user', {})
+ except KeyError:
+ e = sys.exc_info()[1]
+ raise MalformedResponseError('Auth JSON response is \
+ missing required elements', e)
+
+ return self
+
+ def is_token_valid(self):
+ """
+ Return True if the current auth token is already cached and hasn't
+ expired yet.
+
+ :return: ``True`` if the token is still valid, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ if not self.auth_token:
+ return False
+
+ if not self.auth_token_expires:
+ return False
+
+ expires = self.auth_token_expires - \
+ datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS)
+
+ time_tuple_expires = expires.utctimetuple()
+ time_tuple_now = datetime.datetime.utcnow().utctimetuple()
+
+ if time_tuple_now < time_tuple_expires:
+ return True
+
+ return False
+
+
+class OpenStackServiceCatalog(object):
+ """
+ http://docs.openstack.org/api/openstack-identity-service/2.0/content/
+
+ This class should be instanciated with the contents of the
+ 'serviceCatalog' in the auth response. This will do the work of figuring
+ out which services actually exist in the catalog as well as split them up
+ by type, name, and region if available
+ """
+
+ _auth_version = None
+ _service_catalog = None
+
+ def __init__(self, service_catalog, ex_force_auth_version=None):
+ self._auth_version = ex_force_auth_version or AUTH_API_VERSION
+ self._service_catalog = {}
+
+ # Check this way because there are a couple of different 2.0_*
+ # auth types.
+ if '2.0' in self._auth_version:
+ self._parse_auth_v2(service_catalog)
+ elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
+ self._parse_auth_v1(service_catalog)
+ else:
+ raise LibcloudError('auth version "%s" not supported'
+ % (self._auth_version))
+
+ def get_catalog(self):
+ return self._service_catalog
+
+ def get_public_urls(self, service_type=None, name=None):
+ endpoints = self.get_endpoints(service_type=service_type,
+ name=name)
+
+ result = []
+ for endpoint in endpoints:
+ if 'publicURL' in endpoint:
+ result.append(endpoint['publicURL'])
+
+ return result
+
+ def get_endpoints(self, service_type=None, name=None):
+ eps = []
+
+ if '2.0' in self._auth_version:
+ endpoints = self._service_catalog.get(service_type, {}) \
+ .get(name, {})
+ elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
+ endpoints = self._service_catalog.get(name, {})
+
+ for regionName, values in endpoints.items():
+ eps.append(values[0])
+
+ return eps
+
+ def get_endpoint(self, service_type=None, name=None, region=None):
+ if '2.0' in self._auth_version:
+ endpoint = self._service_catalog.get(service_type, {}) \
+ .get(name, {}).get(region, [])
+ elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
+ endpoint = self._service_catalog.get(name, {}).get(region, [])
+
+ # ideally an endpoint either isn't found or only one match is found.
+ if len(endpoint) == 1:
+ return endpoint[0]
+ else:
+ return {}
+
+ def _parse_auth_v1(self, service_catalog):
+ for service, endpoints in service_catalog.items():
+
+ self._service_catalog[service] = {}
+
+ for endpoint in endpoints:
+ region = endpoint.get('region')
+
+ if region not in self._service_catalog[service]:
+ self._service_catalog[service][region] = []
+
+ self._service_catalog[service][region].append(endpoint)
+
+ def _parse_auth_v2(self, service_catalog):
+ for service in service_catalog:
+ service_type = service['type']
+ service_name = service.get('name', None)
+
+ if service_type not in self._service_catalog:
+ self._service_catalog[service_type] = {}
+
+ if service_name not in self._service_catalog[service_type]:
+ self._service_catalog[service_type][service_name] = {}
+
+ for endpoint in service.get('endpoints', []):
+ region = endpoint.get('region', None)
+
+ catalog = self._service_catalog[service_type][service_name]
+ if region not in catalog:
+ catalog[region] = []
+
+ catalog[region].append(endpoint)
+
+
+class OpenStackBaseConnection(ConnectionUserAndKey):
+
+ """
+ Base class for OpenStack connections.
+
+ :param user_id: User name to use when authenticating
+ :type user_id: ``str``
+
+ :param key: Secret to use when authenticating.
+ :type key: ``str``
+
+ :param secure: Use HTTPS? (True by default.)
+ :type secure: ``bool``
+
+ :param ex_force_base_url: Base URL for connection requests. If
+ not specified, this will be determined by authenticating.
+ :type ex_force_base_url: ``str``
+
+ :param ex_force_auth_url: Base URL for authentication requests.
+ :type ex_force_auth_url: ``str``
+
+ :param ex_force_auth_version: Authentication version to use. If
+ not specified, defaults to AUTH_API_VERSION.
+ :type ex_force_auth_version: ``str``
+
+ :param ex_force_auth_token: Authentication token to use for
+ connection requests. If specified, the connection will not attempt
+ to authenticate, and the value of ex_force_base_url will be used to
+ determine the base request URL. If ex_force_auth_token is passed in,
+ ex_force_base_url must also be provided.
+ :type ex_force_auth_token: ``str``
+
+ :param ex_tenant_name: When authenticating, provide this tenant
+ name to the identity service. A scoped token will be returned.
+ Some cloud providers require the tenant name to be provided at
+ authentication time. Others will use a default tenant if none
+ is provided.
+ :type ex_tenant_name: ``str``
+
+ :param ex_force_service_type: Service type to use when selecting an
+ service. If not specified, a provider specific default will be used.
+ :type ex_force_service_type: ``str``
+
+ :param ex_force_service_name: Service name to use when selecting an
+ service. If not specified, a provider specific default will be used.
+ :type ex_force_service_name: ``str``
+
+ :param ex_force_service_region: Region to use when selecting an
+ service. If not specified, a provider specific default will be used.
+ :type ex_force_service_region: ``str``
+ """
+
+ auth_url = None
+ auth_token = None
+ auth_token_expires = None
+ auth_user_info = None
+ service_catalog = None
+ service_type = None
+ service_name = None
+ service_region = None
+ _auth_version = None
+
+ def __init__(self, user_id, key, secure=True,
+ host=None, port=None, timeout=None,
+ ex_force_base_url=None,
+ ex_force_auth_url=None,
+ ex_force_auth_version=None,
+ ex_force_auth_token=None,
+ ex_tenant_name=None,
+ ex_force_service_type=None,
+ ex_force_service_name=None,
+ ex_force_service_region=None):
+ super(OpenStackBaseConnection, self).__init__(
+ user_id, key, secure=secure, timeout=timeout)
+
+ if ex_force_auth_version:
+ self._auth_version = ex_force_auth_version
+
+ self._ex_force_base_url = ex_force_base_url
+ self._ex_force_auth_url = ex_force_auth_url
+ self._ex_force_auth_token = ex_force_auth_token
+ self._ex_tenant_name = ex_tenant_name
+ self._ex_force_service_type = ex_force_service_type
+ self._ex_force_service_name = ex_force_service_name
+ self._ex_force_service_region = ex_force_service_region
+
+ if ex_force_auth_token and not ex_force_base_url:
+ raise LibcloudError(
+ 'Must also provide ex_force_base_url when specifying '
+ 'ex_force_auth_token.')
+
+ if ex_force_auth_token:
+ self.auth_token = ex_force_auth_token
+
+ if not self._auth_version:
+ self._auth_version = AUTH_API_VERSION
+
+ auth_url = self._get_auth_url()
+
+ if not auth_url:
+ raise LibcloudError('OpenStack instance must ' +
+ 'have auth_url set')
+
+ osa = OpenStackAuthConnection(self, auth_url, self._auth_version,
+ self.user_id, self.key,
+ tenant_name=self._ex_tenant_name,
+ timeout=self.timeout)
+ self._osa = osa
+
+ def _get_auth_url(self):
+ """
+ Retrieve auth url for this instance using either "ex_force_auth_url"
+ constructor kwarg of "auth_url" class variable.
+ """
+ auth_url = self.auth_url
+
+ if self._ex_force_auth_url is not None:
+ auth_url = self._ex_force_auth_url
+
+ return auth_url
+
+ def get_service_catalog(self):
+ if self.service_catalog is None:
+ self._populate_hosts_and_request_paths()
+
+ return self.service_catalog
+
+ def get_endpoint(self):
+ """
+ Selects the endpoint to use based on provider specific values,
+ or overrides passed in by the user when setting up the driver.
+
+ :returns: url of the relevant endpoint for the driver
+ """
+ service_type = self.service_type
+ service_name = self.service_name
+ service_region = self.service_region
+ if self._ex_force_service_type:
+ service_type = self._ex_force_service_type
+ if self._ex_force_service_name:
+ service_name = self._ex_force_service_name
+ if self._ex_force_service_region:
+ service_region = self._ex_force_service_region
+
+ ep = self.service_catalog.get_endpoint(service_type=service_type,
+ name=service_name,
+ region=service_region)
+ if 'publicURL' in ep:
+ return ep['publicURL']
+
+ raise LibcloudError('Could not find specified endpoint')
+
+ def add_default_headers(self, headers):
+ headers['X-Auth-Token'] = self.auth_token
+ headers['Accept'] = self.accept_format
+ return headers
+
+ def morph_action_hook(self, action):
+ self._populate_hosts_and_request_paths()
+ return super(OpenStackBaseConnection, self).morph_action_hook(action)
+
+ def request(self, **kwargs):
+ return super(OpenStackBaseConnection, self).request(**kwargs)
+
+ def _set_up_connection_info(self, url):
+ result = self._tuple_from_url(url)
+ (self.host, self.port, self.secure, self.request_path) = result
+
+ def _populate_hosts_and_request_paths(self):
+ """
+ OpenStack uses a separate host for API calls which is only provided
+ after an initial authentication request.
+ """
+ osa = self._osa
+
+ if self._ex_force_auth_token:
+ # If ex_force_auth_token is provided we always hit the api directly
+ # and never try to authenticate.
+ #
+ # Note: When ex_force_auth_token is provided, ex_force_base_url
+ # must be provided as well.
+ self._set_up_connection_info(url=self._ex_force_base_url)
+ return
+
+ if not osa.is_token_valid():
+ # Token is not available or it has expired. Need to retrieve a
+ # new one.
+ osa.authenticate() # may throw InvalidCreds
+
+ self.auth_token = osa.auth_token
+ self.auth_token_expires = osa.auth_token_expires
+ self.auth_user_info = osa.auth_user_info
+
+ # Pull out and parse the service catalog
+ osc = OpenStackServiceCatalog(
+ osa.urls, ex_force_auth_version=self._auth_version)
+ self.service_catalog = osc
+
+ url = self._ex_force_base_url or self.get_endpoint()
+ self._set_up_connection_info(url=url)
+
+
+class OpenStackDriverMixin(object):
+
+ def __init__(self, *args, **kwargs):
+ self._ex_force_base_url = kwargs.get('ex_force_base_url', None)
+ self._ex_force_auth_url = kwargs.get('ex_force_auth_url', None)
+ self._ex_force_auth_version = kwargs.get('ex_force_auth_version', None)
+ self._ex_force_auth_token = kwargs.get('ex_force_auth_token', None)
+ self._ex_tenant_name = kwargs.get('ex_tenant_name', None)
+ self._ex_force_service_type = kwargs.get('ex_force_service_type', None)
+ self._ex_force_service_name = kwargs.get('ex_force_service_name', None)
+ self._ex_force_service_region = kwargs.get('ex_force_service_region',
+ None)
+
+ def openstack_connection_kwargs(self):
+ """
+
+ :rtype: ``dict``
+ """
+ rv = {}
+ if self._ex_force_base_url:
+ rv['ex_force_base_url'] = self._ex_force_base_url
+ if self._ex_force_auth_token:
+ rv['ex_force_auth_token'] = self._ex_force_auth_token
+ if self._ex_force_auth_url:
+ rv['ex_force_auth_url'] = self._ex_force_auth_url
+ if self._ex_force_auth_version:
+ rv['ex_force_auth_version'] = self._ex_force_auth_version
+ if self._ex_tenant_name:
+ rv['ex_tenant_name'] = self._ex_tenant_name
+ if self._ex_force_service_type:
+ rv['ex_force_service_type'] = self._ex_force_service_type
+ if self._ex_force_service_name:
+ rv['ex_force_service_name'] = self._ex_force_service_name
+ if self._ex_force_service_region:
+ rv['ex_force_service_region'] = self._ex_force_service_region
+ return rv
diff --git a/awx/lib/site-packages/libcloud/common/rackspace.py b/awx/lib/site-packages/libcloud/common/rackspace.py
new file mode 100644
index 0000000000..3826ba6f62
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/rackspace.py
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common settings for Rackspace Cloud Servers and Cloud Files
+"""
+
+__all__ = [
+ 'AUTH_URL'
+]
+
+AUTH_URL = 'https://auth.api.rackspacecloud.com'
diff --git a/awx/lib/site-packages/libcloud/common/types.py b/awx/lib/site-packages/libcloud/common/types.py
new file mode 100644
index 0000000000..98250c1ba2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/types.py
@@ -0,0 +1,144 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.utils.py3 import httplib
+
+__all__ = [
+ "LibcloudError",
+ "MalformedResponseError",
+ "ProviderError",
+ "InvalidCredsError",
+ "InvalidCredsException",
+ "LazyList"
+]
+
+
+class LibcloudError(Exception):
+ """The base class for other libcloud exceptions"""
+
+ def __init__(self, value, driver=None):
+ self.value = value
+ self.driver = driver
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ("")
+
+
+class MalformedResponseError(LibcloudError):
+ """Exception for the cases when a provider returns a malformed
+ response, e.g. you request JSON and provider returns
+ '
something
' due to some error on their side."""
+
+ def __init__(self, value, body=None, driver=None):
+ self.value = value
+ self.driver = driver
+ self.body = body
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return (": "
+ + repr(self.body))
+
+
+class ProviderError(LibcloudError):
+ """
+ Exception used when provider gives back
+ error response (HTTP 4xx, 5xx) for a request.
+
+ Specific sub types can be derieved for errors like
+ HTTP 401 : InvalidCredsError
+ HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError
+ """
+
+ def __init__(self, value, http_code,
+ driver=None):
+ self.value = value
+ self.http_code = http_code
+ self.driver = driver
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return repr(self.value)
+
+
+class InvalidCredsError(ProviderError):
+ """Exception used when invalid credentials are used on a provider."""
+
+ def __init__(self, value='Invalid credentials with the provider',
+ driver=None):
+ super(InvalidCredsError, self).__init__(value,
+ http_code=httplib.UNAUTHORIZED,
+ driver=driver)
+
+
+# Deprecated alias of :class:`InvalidCredsError`
+InvalidCredsException = InvalidCredsError
+
+
+class LazyList(object):
+
+ def __init__(self, get_more, value_dict=None):
+ self._data = []
+ self._last_key = None
+ self._exhausted = False
+ self._all_loaded = False
+ self._get_more = get_more
+ self._value_dict = value_dict or {}
+
+ def __iter__(self):
+ if not self._all_loaded:
+ self._load_all()
+
+ data = self._data
+ for i in data:
+ yield i
+
+ def __getitem__(self, index):
+ if index >= len(self._data) and not self._all_loaded:
+ self._load_all()
+
+ return self._data[index]
+
+ def __len__(self):
+ self._load_all()
+ return len(self._data)
+
+ def __repr__(self):
+ self._load_all()
+ repr_string = ', ' .join([repr(item) for item in self._data])
+ repr_string = '[%s]' % (repr_string)
+ return repr_string
+
+ def _load_all(self):
+ while not self._exhausted:
+ newdata, self._last_key, self._exhausted = \
+ self._get_more(last_key=self._last_key,
+ value_dict=self._value_dict)
+ self._data.extend(newdata)
+ self._all_loaded = True
diff --git a/awx/lib/site-packages/libcloud/common/xmlrpc.py b/awx/lib/site-packages/libcloud/common/xmlrpc.py
new file mode 100644
index 0000000000..2502ea6093
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/common/xmlrpc.py
@@ -0,0 +1,108 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Base classes for working with xmlrpc APIs
+"""
+
+import sys
+
+from libcloud.utils.py3 import xmlrpclib
+from libcloud.utils.py3 import httplib
+from libcloud.common.base import Response, Connection
+
+
+class ProtocolError(Exception):
+ pass
+
+
+class ErrorCodeMixin(object):
+ """
+ This is a helper for API's that have a well defined collection of error
+ codes that are easily parsed out of error messages. It acts as a factory:
+ it finds the right exception for the error code, fetches any parameters it
+ needs from the context and raises it.
+ """
+
+ exceptions = {}
+
+ def raise_exception_for_error(self, error_code, message):
+ exceptionCls = self.exceptions.get(error_code, None)
+ if exceptionCls is None:
+ return
+ context = self.connection.context
+ driver = self.connection.driver
+ params = {}
+ if hasattr(exceptionCls, 'kwargs'):
+ for key in exceptionCls.kwargs:
+ if key in context:
+ params[key] = context[key]
+ raise exceptionCls(value=message, driver=driver, **params)
+
+
+class XMLRPCResponse(ErrorCodeMixin, Response):
+
+ defaultExceptionCls = Exception
+
+ def success(self):
+ return self.status == httplib.OK
+
+ def parse_body(self):
+ try:
+ params, methodname = xmlrpclib.loads(self.body)
+ if len(params) == 1:
+ params = params[0]
+ return params
+ except xmlrpclib.Fault:
+ e = sys.exc_info()[1]
+ self.raise_exception_for_error(e.faultCode, e.faultString)
+ error_string = '%s: %s' % (e.faultCode, e.faultString)
+ raise self.defaultExceptionCls(error_string)
+
+ def parse_error(self):
+ msg = 'Server returned an invalid xmlrpc response (%d)' % (self.status)
+ raise ProtocolError(msg)
+
+
+class XMLRPCConnection(Connection):
+ """
+ Connection class which can call XMLRPC based API's.
+
+ This class uses the xmlrpclib marshalling and demarshalling code but uses
+ the http transports provided by libcloud giving it better certificate
+ validation and debugging helpers than the core client library.
+ """
+
+ responseCls = XMLRPCResponse
+
+ def add_default_headers(self, headers):
+ headers['Content-Type'] = 'text/xml'
+ return headers
+
+ def request(self, method_name, *args, **kwargs):
+ """
+ Call a given `method_name`.
+
+ :type method_name: ``str``
+ :param method_name: A method exposed by the xmlrpc endpoint that you
+ are connecting to.
+
+ :type args: ``tuple``
+ :param args: Arguments to invoke with method with.
+ """
+ endpoint = kwargs.get('endpoint', self.endpoint)
+ data = xmlrpclib.dumps(args, methodname=method_name, allow_none=True)
+ return super(XMLRPCConnection, self).request(endpoint,
+ data=data,
+ method='POST')
diff --git a/awx/lib/site-packages/libcloud/compute/__init__.py b/awx/lib/site-packages/libcloud/compute/__init__.py
new file mode 100644
index 0000000000..6d0970a00c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/__init__.py
@@ -0,0 +1,3 @@
+"""
+Module for working with Cloud Servers
+"""
diff --git a/awx/lib/site-packages/libcloud/compute/base.py b/awx/lib/site-packages/libcloud/compute/base.py
new file mode 100644
index 0000000000..f9a9e31d60
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/base.py
@@ -0,0 +1,1477 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides base classes for working with drivers
+"""
+
+from __future__ import with_statement
+
+import sys
+import time
+import hashlib
+import os
+import socket
+import binascii
+
+from libcloud.utils.py3 import b
+
+import libcloud.compute.ssh
+from libcloud.pricing import get_size_price
+from libcloud.compute.types import NodeState, DeploymentError
+from libcloud.compute.ssh import SSHClient
+from libcloud.common.base import ConnectionKey
+from libcloud.common.base import BaseDriver
+from libcloud.common.types import LibcloudError
+from libcloud.compute.ssh import have_paramiko
+
+from libcloud.utils.networking import is_private_subnet
+from libcloud.utils.networking import is_valid_ip_address
+
+if have_paramiko:
+ from paramiko.ssh_exception import SSHException
+ SSH_TIMEOUT_EXCEPTION_CLASSES = (SSHException, IOError, socket.gaierror,
+ socket.error)
+else:
+ SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error)
+
+# How long to wait for the node to come online after creating it
+NODE_ONLINE_WAIT_TIMEOUT = 10 * 60
+
+# How long to try connecting to a remote SSH server when running a deployment
+# script.
+SSH_CONNECT_TIMEOUT = 5 * 60
+
+
+__all__ = [
+ 'Node',
+ 'NodeState',
+ 'NodeSize',
+ 'NodeImage',
+ 'NodeLocation',
+ 'NodeAuthSSHKey',
+ 'NodeAuthPassword',
+ 'NodeDriver',
+
+ 'StorageVolume',
+ 'VolumeSnapshot',
+
+ # Deprecated, moved to libcloud.utils.networking
+ 'is_private_subnet',
+ 'is_valid_ip_address'
+]
+
+
+class UuidMixin(object):
+ """
+ Mixin class for get_uuid function.
+ """
+
+ def __init__(self):
+ self._uuid = None
+
+ def get_uuid(self):
+ """
+ Unique hash for a node, node image, or node size
+
+ The hash is a function of an SHA1 hash of the node, node image,
+ or node size's ID and its driver which means that it should be
+ unique between all objects of its type.
+ In some subclasses (e.g. GoGridNode) there is no ID
+ available so the public IP address is used. This means that,
+ unlike a properly done system UUID, the same UUID may mean a
+ different system install at a different time
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> node = driver.create_node()
+ >>> node.get_uuid()
+ 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
+
+ Note, for example, that this example will always produce the
+ same UUID!
+
+ :rtype: ``str``
+ """
+ if not self._uuid:
+ self._uuid = hashlib.sha1(b('%s:%s' %
+ (self.id, self.driver.type))).hexdigest()
+
+ return self._uuid
+
+ @property
+ def uuid(self):
+ return self.get_uuid()
+
+
+class Node(UuidMixin):
+ """
+ Provide a common interface for handling nodes of all types.
+
+ The Node object provides the interface in libcloud through which
+ we can manipulate nodes in different cloud providers in the same
+ way. Node objects don't actually do much directly themselves,
+ instead the node driver handles the connection to the node.
+
+ You don't normally create a node object yourself; instead you use
+ a driver and then have that create the node for you.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> node = driver.create_node()
+ >>> node.public_ips[0]
+ '127.0.0.3'
+ >>> node.name
+ 'dummy-3'
+
+ You can also get nodes from the driver's list_node function.
+
+ >>> node = driver.list_nodes()[0]
+ >>> node.name
+ 'dummy-1'
+
+ The node keeps a reference to its own driver which means that we
+ can work on nodes from different providers without having to know
+ which is which.
+
+ >>> driver = DummyNodeDriver(72)
+ >>> node2 = driver.create_node()
+ >>> node.driver.creds
+ 0
+ >>> node2.driver.creds
+ 72
+
+ Although Node objects can be subclassed, this isn't normally
+ done. Instead, any driver specific information is stored in the
+ "extra" attribute of the node.
+
+ >>> node.extra
+ {'foo': 'bar'}
+ """
+
+ def __init__(self, id, name, state, public_ips, private_ips,
+ driver, size=None, image=None, extra=None):
+ """
+ :param id: Node ID.
+ :type id: ``str``
+
+ :param name: Node name.
+ :type name: ``str``
+
+ :param state: Node state.
+ :type state: :class:`libcloud.compute.types.NodeState`
+
+ :param public_ips: Public IP addresses associated with this node.
+ :type public_ips: ``list``
+
+ :param private_ips: Private IP addresses associated with this node.
+ :type private_ips: ``list``
+
+ :param driver: Driver this node belongs to.
+ :type driver: :class:`.NodeDriver`
+
+ :param size: Size of this node. (optional)
+ :type size: :class:`.NodeSize`
+
+ :param image: Image of this node. (optional)
+ :type size: :class:`.NodeImage`
+
+ :param extra: Optional provider specific attributes associated with
+ this node.
+ :type extra: ``dict``
+
+ """
+ self.id = str(id) if id else None
+ self.name = name
+ self.state = state
+ self.public_ips = public_ips if public_ips else []
+ self.private_ips = private_ips if private_ips else []
+ self.driver = driver
+ self.size = size
+ self.image = image
+ self.extra = extra or {}
+ UuidMixin.__init__(self)
+
+ def reboot(self):
+ """
+ Reboot this node
+
+ :return: ``bool``
+
+ This calls the node's driver and reboots the node
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> node = driver.create_node()
+ >>> node.state == NodeState.RUNNING
+ True
+ >>> node.state == NodeState.REBOOTING
+ False
+ >>> node.reboot()
+ True
+ >>> node.state == NodeState.REBOOTING
+ True
+ """
+ return self.driver.reboot_node(self)
+
+ def destroy(self):
+ """
+ Destroy this node
+
+ :return: ``bool``
+
+ This calls the node's driver and destroys the node
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> from libcloud.compute.types import NodeState
+ >>> node = driver.create_node()
+ >>> node.state == NodeState.RUNNING
+ True
+ >>> node.destroy()
+ True
+ >>> node.state == NodeState.RUNNING
+ False
+
+ """
+ return self.driver.destroy_node(self)
+
+ def __repr__(self):
+ return (('')
+ % (self.uuid, self.name, self.state, self.public_ips,
+ self.private_ips, self.driver.name))
+
+
+class NodeSize(UuidMixin):
+ """
+ A Base NodeSize class to derive from.
+
+ NodeSizes are objects which are typically returned a driver's
+ list_sizes function. They contain a number of different
+ parameters which define how big an image is.
+
+ The exact parameters available depends on the provider.
+
+ N.B. Where a parameter is "unlimited" (for example bandwidth in
+ Amazon) this will be given as 0.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> size = driver.list_sizes()[0]
+ >>> size.ram
+ 128
+ >>> size.bandwidth
+ 500
+ >>> size.price
+ 4
+ """
+
+ def __init__(self, id, name, ram, disk, bandwidth, price,
+ driver, extra=None):
+ """
+ :param id: Size ID.
+ :type id: ``str``
+
+ :param name: Size name.
+ :type name: ``str``
+
+ :param ram: Amount of memory (in MB) provided by this size.
+ :type ram: ``int``
+
+ :param disk: Amount of disk storage (in GB) provided by this image.
+ :type disk: ``int``
+
+ :param bandwidth: Amount of bandiwdth included with this size.
+ :type bandwidth: ``int``
+
+ :param price: Price (in US dollars) of running this node for an hour.
+ :type price: ``float``
+
+ :param driver: Driver this size belongs to.
+ :type driver: :class:`.NodeDriver`
+
+ :param extra: Optional provider specific attributes associated with
+ this size.
+ :type extra: ``dict``
+ """
+ self.id = str(id)
+ self.name = name
+ self.ram = ram
+ self.disk = disk
+ self.bandwidth = bandwidth
+ self.price = price
+ self.driver = driver
+ self.extra = extra or {}
+ UuidMixin.__init__(self)
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.ram, self.disk, self.bandwidth,
+ self.price, self.driver.name))
+
+
+class NodeImage(UuidMixin):
+ """
+ An operating system image.
+
+ NodeImage objects are typically returned by the driver for the
+ cloud provider in response to the list_images function
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> image = driver.list_images()[0]
+ >>> image.name
+ 'Ubuntu 9.10'
+
+ Apart from name and id, there is no further standard information;
+ other parameters are stored in a driver specific "extra" variable
+
+ When creating a node, a node image should be given as an argument
+ to the create_node function to decide which OS image to use.
+
+ >>> node = driver.create_node(image=image)
+ """
+
+ def __init__(self, id, name, driver, extra=None):
+ """
+ :param id: Image ID.
+ :type id: ``str``
+
+ :param name: Image name.
+ :type name: ``str``
+
+ :param driver: Driver this image belongs to.
+ :type driver: :class:`.NodeDriver`
+
+ :param extra: Optional provided specific attributes associated with
+ this image.
+ :type extra: ``dict``
+ """
+ self.id = str(id)
+ self.name = name
+ self.driver = driver
+ self.extra = extra or {}
+ UuidMixin.__init__(self)
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.driver.name))
+
+
+class NodeLocation(object):
+ """
+ A physical location where nodes can be.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> location = driver.list_locations()[0]
+ >>> location.country
+ 'US'
+ """
+
+ def __init__(self, id, name, country, driver):
+ """
+ :param id: Location ID.
+ :type id: ``str``
+
+ :param name: Location name.
+ :type name: ``str``
+
+ :param country: Location country.
+ :type country: ``str``
+
+ :param driver: Driver this location belongs to.
+ :type driver: :class:`.NodeDriver`
+ """
+ self.id = str(id)
+ self.name = name
+ self.country = country
+ self.driver = driver
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.country, self.driver.name))
+
+
+class NodeAuthSSHKey(object):
+ """
+ An SSH key to be installed for authentication to a node.
+
+ This is the actual contents of the users ssh public key which will
+ normally be installed as root's public key on the node.
+
+ >>> pubkey = '...' # read from file
+ >>> from libcloud.compute.base import NodeAuthSSHKey
+ >>> k = NodeAuthSSHKey(pubkey)
+ >>> k
+
+ """
+
+ def __init__(self, pubkey):
+ """
+ :param pubkey: Public key matetiral.
+ :type pubkey: ``str``
+ """
+ self.pubkey = pubkey
+
+ def __repr__(self):
+ return ''
+
+
+class NodeAuthPassword(object):
+ """
+ A password to be used for authentication to a node.
+ """
+ def __init__(self, password, generated=False):
+ """
+ :param password: Password.
+ :type password: ``str``
+
+ :type generated: ``True`` if this password was automatically generated,
+ ``False`` otherwise.
+ """
+ self.password = password
+ self.generated = generated
+
+ def __repr__(self):
+ return ''
+
+
+class StorageVolume(UuidMixin):
+ """
+ A base StorageVolume class to derive from.
+ """
+
+ def __init__(self, id, name, size, driver, extra=None):
+ """
+ :param id: Storage volume ID.
+ :type id: ``str``
+
+ :param name: Storage volume name.
+ :type name: ``str``
+
+ :param size: Size of this volume (in GB).
+ :type size: ``int``
+
+ :param driver: Driver this image belongs to.
+ :type driver: :class:`.NodeDriver`
+
+ :param extra: Optional provider specific attributes.
+ :type extra: ``dict``
+ """
+ self.id = id
+ self.name = name
+ self.size = size
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def list_snapshots(self):
+ """
+ :rtype: ``list`` of ``VolumeSnapshot``
+ """
+ return self.driver.list_volume_snapshots(volume=self)
+
+ def attach(self, node, device=None):
+ """
+ Attach this volume to a node.
+
+ :param node: Node to attach volume to
+ :type node: :class:`.Node`
+
+ :param device: Where the device is exposed,
+ e.g. '/dev/sdb (optional)
+ :type device: ``str``
+
+ :return: ``True`` if attach was successful, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+
+ return self.driver.attach_volume(node=node, volume=self, device=device)
+
+ def detach(self):
+ """
+ Detach this volume from its node
+
+ :return: ``True`` if detach was successful, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+
+ return self.driver.detach_volume(volume=self)
+
+ def snapshot(self, name):
+ """
+ Creates a snapshot of this volume.
+
+ :return: Created snapshot.
+ :rtype: ``VolumeSnapshot``
+ """
+ return self.driver.create_volume_snapshot(volume=self, name=name)
+
+ def destroy(self):
+ """
+ Destroy this storage volume.
+
+ :return: ``True`` if destroy was successful, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+
+ return self.driver.destroy_volume(volume=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.size, self.driver.name)
+
+
+class VolumeSnapshot(object):
+ """
+ A base VolumeSnapshot class to derive from.
+ """
+ def __init__(self, id, driver, size=None, extra=None):
+ """
+ VolumeSnapshot constructor.
+
+ :param id: Snapshot ID.
+ :type id: ``str``
+
+ :param size: A snapshot size in GB.
+ :type size: ``int``
+
+ :param extra: Provider depends parameters for snapshot.
+ :type extra: ``dict``
+ """
+ self.id = id
+ self.driver = driver
+ self.size = size
+ self.extra = extra or {}
+
+ def destroy(self):
+ """
+ Destroys this snapshot.
+
+ :rtype: ``bool``
+ """
+ return self.driver.destroy_volume_snapshot(snapshot=self)
+
+ def __repr__(self):
+ return ('' %
+ (self.id, self.size, self.driver.name))
+
+
+class KeyPair(object):
+ """
+ Represents a SSH key pair.
+ """
+
+ def __init__(self, name, public_key, fingerprint, driver, private_key=None,
+ extra=None):
+ """
+ Constructor.
+
+ :keyword name: Name of the key pair object.
+ :type name: ``str``
+
+ :keyword fingerprint: Key fingerprint.
+ :type fingerprint: ``str``
+
+ :keyword public_key: Public key in OpenSSH format.
+ :type public_key: ``str``
+
+ :keyword private_key: Private key in PEM format.
+ :type private_key: ``str``
+
+ :keyword extra: Provider specific attributes associated with this
+ key pair. (optional)
+ :type extra: ``dict``
+ """
+ self.name = name
+ self.fingerprint = fingerprint
+ self.public_key = public_key
+ self.private_key = private_key
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return ('' %
+ (self.name, self.fingerprint, self.driver.name))
+
+
+class NodeDriver(BaseDriver):
+ """
+ A base NodeDriver class to derive from
+
+ This class is always subclassed by a specific driver. For
+ examples of base behavior of most functions (except deploy node)
+ see the dummy driver.
+
+ """
+
+ connectionCls = ConnectionKey
+ name = None
+ type = None
+ port = None
+ features = {'create_node': []}
+
+ """
+ List of available features for a driver.
+ - :meth:`libcloud.compute.base.NodeDriver.create_node`
+ - ssh_key: Supports :class:`.NodeAuthSSHKey` as an authentication
+ method for nodes.
+ - password: Supports :class:`.NodeAuthPassword` as an
+ authentication
+ method for nodes.
+ - generates_password: Returns a password attribute on the Node
+ object returned from creation.
+ """
+
+ NODE_STATE_MAP = {}
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ api_version=None, **kwargs):
+ super(NodeDriver, self).__init__(key=key, secret=secret, secure=secure,
+ host=host, port=port,
+ api_version=api_version, **kwargs)
+
+ def list_nodes(self):
+ """
+ List all nodes.
+
+ :return: list of node objects
+ :rtype: ``list`` of :class:`.Node`
+ """
+ raise NotImplementedError(
+ 'list_nodes not implemented for this driver')
+
+ def list_sizes(self, location=None):
+ """
+ List sizes on a provider
+
+ :param location: The location at which to list sizes
+ :type location: :class:`.NodeLocation`
+
+ :return: list of node size objects
+ :rtype: ``list`` of :class:`.NodeSize`
+ """
+ raise NotImplementedError(
+ 'list_sizes not implemented for this driver')
+
+ def list_locations(self):
+ """
+ List data centers for a provider
+
+ :return: list of node location objects
+ :rtype: ``list`` of :class:`.NodeLocation`
+ """
+ raise NotImplementedError(
+ 'list_locations not implemented for this driver')
+
+ def create_node(self, **kwargs):
+ """
+ Create a new node instance. This instance will be started
+ automatically.
+
+ Not all hosting API's are created equal and to allow libcloud to
+ support as many as possible there are some standard supported
+ variations of ``create_node``. These are declared using a
+ ``features`` API.
+ You can inspect ``driver.features['create_node']`` to see what
+ variation of the API you are dealing with:
+
+ ``ssh_key``
+ You can inject a public key into a new node allows key based SSH
+ authentication.
+ ``password``
+ You can inject a password into a new node for SSH authentication.
+ If no password is provided libcloud will generated a password.
+ The password will be available as
+ ``return_value.extra['password']``.
+ ``generates_password``
+ The hosting provider will generate a password. It will be returned
+ to you via ``return_value.extra['password']``.
+
+ Some drivers allow you to set how you will authenticate with the
+ instance that is created. You can inject this initial authentication
+ information via the ``auth`` parameter.
+
+ If a driver supports the ``ssh_key`` feature flag for ``created_node``
+ you can upload a public key into the new instance::
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> auth = NodeAuthSSHKey('pubkey data here')
+ >>> node = driver.create_node("test_node", auth=auth)
+
+ If a driver supports the ``password`` feature flag for ``create_node``
+ you can set a password::
+
+ >>> driver = DummyNodeDriver(0)
+ >>> auth = NodeAuthPassword('mysecretpassword')
+ >>> node = driver.create_node("test_node", auth=auth)
+
+ If a driver supports the ``password`` feature and you don't provide the
+ ``auth`` argument libcloud will assign a password::
+
+ >>> driver = DummyNodeDriver(0)
+ >>> node = driver.create_node("test_node")
+ >>> password = node.extra['password']
+
+ A password will also be returned in this way for drivers that declare
+ the ``generates_password`` feature, though in that case the password is
+ actually provided to the driver API by the hosting provider rather than
+ generated by libcloud.
+
+ You can only pass a :class:`.NodeAuthPassword` or
+ :class:`.NodeAuthSSHKey` to ``create_node`` via the auth parameter if
+ has the corresponding feature flag.
+
+ :param name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :param size: The size of resources allocated to this node.
+ (required)
+ :type size: :class:`.NodeSize`
+
+ :param image: OS Image to boot on node. (required)
+ :type image: :class:`.NodeImage`
+
+ :param location: Which data center to create a node in. If empty,
+ undefined behavior will be selected. (optional)
+ :type location: :class:`.NodeLocation`
+
+ :param auth: Initial authentication information for the node
+ (optional)
+ :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
+
+ :return: The newly created node.
+ :rtype: :class:`.Node`
+ """
+ raise NotImplementedError(
+ 'create_node not implemented for this driver')
+
+ def deploy_node(self, **kwargs):
+ """
+ Create a new node, and start deployment.
+
+ In order to be able to SSH into a created node access credentials are
+ required.
+
+ A user can pass either a :class:`.NodeAuthPassword` or
+ :class:`.NodeAuthSSHKey` to the ``auth`` argument. If the
+ ``create_node`` implementation supports that kind if credential (as
+ declared in ``self.features['create_node']``) then it is passed on to
+ ``create_node``. Otherwise it is not passed on to ``create_node`` and
+ it is only used for authentication.
+
+ If the ``auth`` parameter is not supplied but the driver declares it
+ supports ``generates_password`` then the password returned by
+ ``create_node`` will be used to SSH into the server.
+
+ Finally, if the ``ssh_key_file`` is supplied that key will be used to
+ SSH into the server.
+
+ This function may raise a :class:`DeploymentException`, if a
+ create_node call was successful, but there is a later error (like SSH
+ failing or timing out). This exception includes a Node object which
+ you may want to destroy if incomplete deployments are not desirable.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> from libcloud.compute.deployment import ScriptDeployment
+ >>> from libcloud.compute.deployment import MultiStepDeployment
+ >>> from libcloud.compute.base import NodeAuthSSHKey
+ >>> driver = DummyNodeDriver(0)
+ >>> key = NodeAuthSSHKey('...') # read from file
+ >>> script = ScriptDeployment("yum -y install emacs strace tcpdump")
+ >>> msd = MultiStepDeployment([key, script])
+ >>> def d():
+ ... try:
+ ... driver.deploy_node(deploy=msd)
+ ... except NotImplementedError:
+ ... print ("not implemented for dummy driver")
+ >>> d()
+ not implemented for dummy driver
+
+ Deploy node is typically not overridden in subclasses. The
+ existing implementation should be able to handle most such.
+
+ :param deploy: Deployment to run once machine is online and
+ available to SSH.
+ :type deploy: :class:`Deployment`
+
+ :param ssh_username: Optional name of the account which is used
+ when connecting to
+ SSH server (default is root)
+ :type ssh_username: ``str``
+
+ :param ssh_alternate_usernames: Optional list of ssh usernames to
+ try to connect with if using the
+ default one fails
+ :type ssh_alternate_usernames: ``list``
+
+ :param ssh_port: Optional SSH server port (default is 22)
+ :type ssh_port: ``int``
+
+ :param ssh_timeout: Optional SSH connection timeout in seconds
+ (default is 10)
+ :type ssh_timeout: ``float``
+
+ :param auth: Initial authentication information for the node
+ (optional)
+ :type auth: :class:`.NodeAuthSSHKey` or :class:`NodeAuthPassword`
+
+ :param ssh_key: A path (or paths) to an SSH private key with which
+ to attempt to authenticate. (optional)
+ :type ssh_key: ``str`` or ``list`` of ``str``
+
+ :param timeout: How many seconds to wait before timing out.
+ (default is 600)
+ :type timeout: ``int``
+
+ :param max_tries: How many times to retry if a deployment fails
+ before giving up (default is 3)
+ :type max_tries: ``int``
+
+ :param ssh_interface: The interface to wait for. Default is
+ 'public_ips', other option is 'private_ips'.
+ :type ssh_interface: ``str``
+ """
+ if not libcloud.compute.ssh.have_paramiko:
+ raise RuntimeError('paramiko is not installed. You can install ' +
+ 'it using pip: pip install paramiko')
+
+ if 'auth' in kwargs:
+ auth = kwargs['auth']
+ if not isinstance(auth, (NodeAuthSSHKey, NodeAuthPassword)):
+ raise NotImplementedError(
+ 'If providing auth, only NodeAuthSSHKey or'
+ 'NodeAuthPassword is supported')
+ elif 'ssh_key' in kwargs:
+ # If an ssh_key is provided we can try deploy_node
+ pass
+ elif 'create_node' in self.features:
+ f = self.features['create_node']
+ if 'generates_password' not in f and "password" not in f:
+ raise NotImplementedError(
+ 'deploy_node not implemented for this driver')
+ else:
+ raise NotImplementedError(
+ 'deploy_node not implemented for this driver')
+
+ node = self.create_node(**kwargs)
+ max_tries = kwargs.get('max_tries', 3)
+
+ password = None
+ if 'auth' in kwargs:
+ if isinstance(kwargs['auth'], NodeAuthPassword):
+ password = kwargs['auth'].password
+ elif 'password' in node.extra:
+ password = node.extra['password']
+
+ ssh_interface = kwargs.get('ssh_interface', 'public_ips')
+
+ # Wait until node is up and running and has IP assigned
+ try:
+ node, ip_addresses = self.wait_until_running(
+ nodes=[node],
+ wait_period=3,
+ timeout=kwargs.get('timeout', NODE_ONLINE_WAIT_TIMEOUT),
+ ssh_interface=ssh_interface)[0]
+ except Exception:
+ e = sys.exc_info()[1]
+ raise DeploymentError(node=node, original_exception=e, driver=self)
+
+ ssh_username = kwargs.get('ssh_username', 'root')
+ ssh_alternate_usernames = kwargs.get('ssh_alternate_usernames', [])
+ ssh_port = kwargs.get('ssh_port', 22)
+ ssh_timeout = kwargs.get('ssh_timeout', 10)
+ ssh_key_file = kwargs.get('ssh_key', None)
+ timeout = kwargs.get('timeout', SSH_CONNECT_TIMEOUT)
+
+ deploy_error = None
+
+ for username in ([ssh_username] + ssh_alternate_usernames):
+ try:
+ self._connect_and_run_deployment_script(
+ task=kwargs['deploy'], node=node,
+ ssh_hostname=ip_addresses[0], ssh_port=ssh_port,
+ ssh_username=username, ssh_password=password,
+ ssh_key_file=ssh_key_file, ssh_timeout=ssh_timeout,
+ timeout=timeout, max_tries=max_tries)
+ except Exception:
+ # Try alternate username
+ # Todo: Need to fix paramiko so we can catch a more specific
+ # exception
+ e = sys.exc_info()[1]
+ deploy_error = e
+ else:
+ # Script successfully executed, don't try alternate username
+ deploy_error = None
+ break
+
+ if deploy_error is not None:
+ raise DeploymentError(node=node, original_exception=deploy_error,
+ driver=self)
+
+ return node
+
+ def reboot_node(self, node):
+ """
+ Reboot a node.
+
+ :param node: The node to be rebooted
+ :type node: :class:`.Node`
+
+ :return: True if the reboot was successful, otherwise False
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'reboot_node not implemented for this driver')
+
+ def destroy_node(self, node):
+ """
+ Destroy a node.
+
+ Depending upon the provider, this may destroy all data associated with
+ the node, including backups.
+
+ :param node: The node to be destroyed
+ :type node: :class:`.Node`
+
+ :return: True if the destroy was successful, False otherwise.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'destroy_node not implemented for this driver')
+
+ ##
+ # Volume and snapshot management methods
+ ##
+
+ def list_volumes(self):
+ """
+ List storage volumes.
+
+ :rtype: ``list`` of :class:`.StorageVolume`
+ """
+ raise NotImplementedError(
+ 'list_volumes not implemented for this driver')
+
+ def list_volume_snapshots(self, volume):
+ """
+ List snapshots for a storage volume.
+
+ :rtype: ``list`` of :class:`VolumeSnapshot`
+ """
+ raise NotImplementedError(
+ 'list_volume_snapshots not implemented for this driver')
+
+ def create_volume(self, size, name, location=None, snapshot=None):
+ """
+ Create a new volume.
+
+ :param size: Size of volume in gigabytes (required)
+ :type size: ``int``
+
+ :param name: Name of the volume to be created
+ :type name: ``str``
+
+ :param location: Which data center to create a volume in. If
+ empty, undefined behavior will be selected.
+ (optional)
+ :type location: :class:`.NodeLocation`
+
+ :param snapshot: Name of snapshot from which to create the new
+ volume. (optional)
+ :type snapshot: ``str``
+
+ :return: The newly created volume.
+ :rtype: :class:`StorageVolume`
+ """
+ raise NotImplementedError(
+ 'create_volume not implemented for this driver')
+
+ def create_volume_snapshot(self, volume, name):
+ """
+ Creates a snapshot of the storage volume.
+
+ :rtype: :class:`VolumeSnapshot`
+ """
+ raise NotImplementedError(
+ 'create_volume_snapshot not implemented for this driver')
+
+ def attach_volume(self, node, volume, device=None):
+ """
+ Attaches volume to node.
+
+ :param node: Node to attach volume to.
+ :type node: :class:`.Node`
+
+ :param volume: Volume to attach.
+ :type volume: :class:`.StorageVolume`
+
+ :param device: Where the device is exposed, e.g. '/dev/sdb'
+ :type device: ``str``
+
+ :rytpe: ``bool``
+ """
+ raise NotImplementedError('attach not implemented for this driver')
+
+ def detach_volume(self, volume):
+ """
+ Detaches a volume from a node.
+
+ :param volume: Volume to be detached
+ :type volume: :class:`.StorageVolume`
+
+ :rtype: ``bool``
+ """
+
+ raise NotImplementedError('detach not implemented for this driver')
+
+ def destroy_volume(self, volume):
+ """
+ Destroys a storage volume.
+
+ :param volume: Volume to be destroyed
+ :type volume: :class:`StorageVolume`
+
+ :rtype: ``bool``
+ """
+
+ raise NotImplementedError(
+ 'destroy_volume not implemented for this driver')
+
+ def destroy_volume_snapshot(self, snapshot):
+ """
+ Destroys a snapshot.
+
+ :rtype: :class:`bool`
+ """
+ raise NotImplementedError(
+ 'destroy_volume_snapshot not implemented for this driver')
+
+ ##
+ # Image management methods
+ ##
+
+ def list_images(self, location=None):
+ """
+ List images on a provider.
+
+ :param location: The location at which to list images.
+ :type location: :class:`.NodeLocation`
+
+ :return: list of node image objects.
+ :rtype: ``list`` of :class:`.NodeImage`
+ """
+ raise NotImplementedError(
+ 'list_images not implemented for this driver')
+
+ def create_image(self, node, name, description=None):
+ """
+ Creates an image from a node object.
+
+ :param node: Node to run the task on.
+ :type node: :class:`.Node`
+
+ :param name: name for new image.
+ :type name: ``str``
+
+ :param description: description for new image.
+ :type name: ``description``
+
+ :rtype: :class:`.NodeImage`:
+ :return: NodeImage instance on success.
+
+ """
+ raise NotImplementedError(
+ 'create_image not implemented for this driver')
+
+ def delete_image(self, node_image):
+ """
+ Deletes a node image from a provider.
+
+ :param node_image: Node image object.
+ :type node_image: :class:`.NodeImage`
+
+ :return: ``True`` if delete_image was successful, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+
+ raise NotImplementedError(
+ 'delete_image not implemented for this driver')
+
+ def get_image(self, image_id):
+ """
+ Returns a single node image from a provider.
+
+ :param image_id: Node to run the task on.
+ :type image_id: ``str``
+
+ :rtype :class:`.NodeImage`:
+ :return: NodeImage instance on success.
+ """
+ raise NotImplementedError(
+ 'get_image not implemented for this driver')
+
+ def copy_image(self, source_region, node_image, name, description=None):
+ """
+ Copies an image from a source region to the current region.
+
+ :param source_region: Region to copy the node from.
+ :type source_region: ``str``
+
+ :param node_image: NodeImage to copy.
+ :type node_image: :class`.NodeImage`:
+
+ :param name: name for new image.
+ :type name: ``str``
+
+ :param description: description for new image.
+ :type name: ``str``
+
+ :rtype: :class:`.NodeImage`:
+ :return: NodeImage instance on success.
+ """
+ raise NotImplementedError(
+ 'copy_image not implemented for this driver')
+
+ ##
+ # SSH key pair management methods
+ ##
+
+ def list_key_pairs(self):
+ """
+ List all the available key pair objects.
+
+ :rtype: ``list`` of :class:`.KeyPair` objects
+ """
+ raise NotImplementedError(
+ 'list_key_pairs not implemented for this driver')
+
+ def get_key_pair(self, name):
+ """
+ Retrieve a single key pair.
+
+ :param name: Name of the key pair to retrieve.
+ :type name: ``str``
+
+ :rtype: :class:`.KeyPair`
+ """
+ raise NotImplementedError(
+ 'get_key_pair not implemented for this driver')
+
+ def create_key_pair(self, name):
+ """
+ Create a new key pair object.
+
+ :param name: Key pair name.
+ :type name: ``str``
+ """
+ raise NotImplementedError(
+ 'create_key_pair not implemented for this driver')
+
+ def import_key_pair_from_string(self, name, key_material):
+ """
+ Import a new public key from string.
+
+ :param name: Key pair name.
+ :type name: ``str``
+
+ :param key_material: Public key material.
+ :type key_material: ``str``
+
+ :rtype: :class:`.KeyPair` object
+ """
+ raise NotImplementedError(
+ 'import_key_pair_from_string not implemented for this driver')
+
+ def import_key_pair_from_file(self, name, key_file_path):
+ """
+ Import a new public key from string.
+
+ :param name: Key pair name.
+ :type name: ``str``
+
+ :param key_file_path: Path to the public key file.
+ :type key_file_path: ``str``
+
+ :rtype: :class:`.KeyPair` object
+ """
+ key_file_path = os.path.expanduser(key_file_path)
+
+ with open(key_file_path, 'r') as fp:
+ key_material = fp.read()
+
+ return self.import_key_pair_from_string(name=name,
+ key_material=key_material)
+
+ def delete_key_pair(self, key_pair):
+ """
+ Delete an existing key pair.
+
+ :param key_pair: Key pair object.
+ :type key_pair: :class`.KeyPair`
+ """
+ raise NotImplementedError(
+ 'delete_key_pair not implemented for this driver')
+
+ def wait_until_running(self, nodes, wait_period=3, timeout=600,
+ ssh_interface='public_ips', force_ipv4=True):
+ """
+ Block until the provided nodes are considered running.
+
+ Node is considered running when it's state is "running" and when it has
+ at least one IP address assigned.
+
+ :param nodes: List of nodes to wait for.
+ :type nodes: ``list`` of :class:`.Node`
+
+ :param wait_period: How many seconds to wait between each loop
+ iteration. (default is 3)
+ :type wait_period: ``int``
+
+ :param timeout: How many seconds to wait before giving up.
+ (default is 600)
+ :type timeout: ``int``
+
+ :param ssh_interface: Which attribute on the node to use to obtain
+ an IP address. Valid options: public_ips,
+ private_ips. Default is public_ips.
+ :type ssh_interface: ``str``
+
+ :param force_ipv4: Ignore IPv6 addresses (default is True).
+ :type force_ipv4: ``bool``
+
+ :return: ``[(Node, ip_addresses)]`` list of tuple of Node instance and
+ list of ip_address on success.
+ :rtype: ``list`` of ``tuple``
+ """
+ def is_supported(address):
+ """
+ Return True for supported address.
+ """
+ if force_ipv4 and not is_valid_ip_address(address=address,
+ family=socket.AF_INET):
+ return False
+ return True
+
+ def filter_addresses(addresses):
+ """
+ Return list of supported addresses.
+ """
+ return [address for address in addresses if is_supported(address)]
+
+ if ssh_interface not in ['public_ips', 'private_ips']:
+ raise ValueError('ssh_interface argument must either be' +
+ 'public_ips or private_ips')
+
+ start = time.time()
+ end = start + timeout
+
+ uuids = set([node.uuid for node in nodes])
+
+ while time.time() < end:
+ all_nodes = self.list_nodes()
+ matching_nodes = list([node for node in all_nodes
+ if node.uuid in uuids])
+
+ if len(matching_nodes) > len(uuids):
+ found_uuids = [node.uuid for node in matching_nodes]
+ msg = ('Unable to match specified uuids ' +
+ '(%s) with existing nodes. Found ' % (uuids) +
+ 'multiple nodes with same uuid: (%s)' % (found_uuids))
+ raise LibcloudError(value=msg, driver=self)
+
+ running_nodes = [node for node in matching_nodes
+ if node.state == NodeState.RUNNING]
+ addresses = [filter_addresses(getattr(node, ssh_interface))
+ for node in running_nodes]
+
+ if len(running_nodes) == len(uuids) == len(addresses):
+ return list(zip(running_nodes, addresses))
+ else:
+ time.sleep(wait_period)
+ continue
+
+ raise LibcloudError(value='Timed out after %s seconds' % (timeout),
+ driver=self)
+
+ def _get_and_check_auth(self, auth):
+ """
+ Helper function for providers supporting :class:`.NodeAuthPassword` or
+ :class:`.NodeAuthSSHKey`
+
+ Validates that only a supported object type is passed to the auth
+ parameter and raises an exception if it is not.
+
+ If no :class:`.NodeAuthPassword` object is provided but one is expected
+ then a password is automatically generated.
+ """
+
+ if isinstance(auth, NodeAuthPassword):
+ if 'password' in self.features['create_node']:
+ return auth
+ raise LibcloudError(
+ 'Password provided as authentication information, but password'
+ 'not supported', driver=self)
+
+ if isinstance(auth, NodeAuthSSHKey):
+ if 'ssh_key' in self.features['create_node']:
+ return auth
+ raise LibcloudError(
+ 'SSH Key provided as authentication information, but SSH Key'
+ 'not supported', driver=self)
+
+ if 'password' in self.features['create_node']:
+ value = os.urandom(16)
+ value = binascii.hexlify(value).decode('ascii')
+ return NodeAuthPassword(value, generated=True)
+
+ if auth:
+ raise LibcloudError(
+ '"auth" argument provided, but it was not a NodeAuthPassword'
+ 'or NodeAuthSSHKey object', driver=self)
+
+ def _wait_until_running(self, node, wait_period=3, timeout=600,
+ ssh_interface='public_ips', force_ipv4=True):
+ # This is here for backward compatibility and will be removed in the
+ # next major release
+ return self.wait_until_running(nodes=[node], wait_period=wait_period,
+ timeout=timeout,
+ ssh_interface=ssh_interface,
+ force_ipv4=force_ipv4)
+
+ def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300):
+ """
+ Try to connect to the remote SSH server. If a connection times out or
+ is refused it is retried up to timeout number of seconds.
+
+ :param ssh_client: A configured SSHClient instance
+ :type ssh_client: ``SSHClient``
+
+ :param wait_period: How many seconds to wait between each loop
+ iteration. (default is 1.5)
+ :type wait_period: ``int``
+
+ :param timeout: How many seconds to wait before giving up.
+ (default is 300)
+ :type timeout: ``int``
+
+ :return: ``SSHClient`` on success
+ """
+ start = time.time()
+ end = start + timeout
+
+ while time.time() < end:
+ try:
+ ssh_client.connect()
+ except SSH_TIMEOUT_EXCEPTION_CLASSES:
+ e = sys.exc_info()[1]
+ message = str(e).lower()
+ expected_msg = 'no such file or directory'
+
+ if isinstance(e, IOError) and expected_msg in message:
+ # Propagate (key) file doesn't exist errors
+ raise e
+
+ # Retry if a connection is refused, timeout occurred,
+ # or the connection fails due to failed authentication.
+ ssh_client.close()
+ time.sleep(wait_period)
+ continue
+ else:
+ return ssh_client
+
+ raise LibcloudError(value='Could not connect to the remote SSH ' +
+ 'server. Giving up.', driver=self)
+
+ def _connect_and_run_deployment_script(self, task, node, ssh_hostname,
+ ssh_port, ssh_username,
+ ssh_password, ssh_key_file,
+ ssh_timeout, timeout, max_tries):
+ """
+ Establish an SSH connection to the node and run the provided deployment
+ task.
+
+ :rtype: :class:`.Node`:
+ :return: Node instance on success.
+ """
+ ssh_client = SSHClient(hostname=ssh_hostname,
+ port=ssh_port, username=ssh_username,
+ password=ssh_password,
+ key_files=ssh_key_file,
+ timeout=ssh_timeout)
+
+ ssh_client = self._ssh_client_connect(ssh_client=ssh_client,
+ timeout=timeout)
+
+ # Execute the deployment task
+ node = self._run_deployment_script(task=task, node=node,
+ ssh_client=ssh_client,
+ max_tries=max_tries)
+ return node
+
+ def _run_deployment_script(self, task, node, ssh_client, max_tries=3):
+ """
+ Run the deployment script on the provided node. At this point it is
+ assumed that SSH connection has already been established.
+
+ :param task: Deployment task to run.
+ :type task: :class:`Deployment`
+
+ :param node: Node to run the task on.
+ :type node: ``Node``
+
+ :param ssh_client: A configured and connected SSHClient instance.
+ :type ssh_client: :class:`SSHClient`
+
+ :param max_tries: How many times to retry if a deployment fails
+ before giving up. (default is 3)
+ :type max_tries: ``int``
+
+ :rtype: :class:`.Node`
+ :return: ``Node`` Node instance on success.
+ """
+ tries = 0
+
+ while tries < max_tries:
+ try:
+ node = task.run(node, ssh_client)
+ except Exception:
+ tries += 1
+
+ if tries >= max_tries:
+ e = sys.exc_info()[1]
+ raise LibcloudError(value='Failed after %d tries: %s'
+ % (max_tries, str(e)), driver=self)
+ else:
+ # Deployment succeeded
+ ssh_client.close()
+ return node
+
+ def _get_size_price(self, size_id):
+ """
+ Return pricing information for the provided size id.
+ """
+ return get_size_price(driver_type='compute',
+ driver_name=self.api_name,
+ size_id=size_id)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/compute/deployment.py b/awx/lib/site-packages/libcloud/compute/deployment.py
new file mode 100644
index 0000000000..018c5f86dc
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/deployment.py
@@ -0,0 +1,263 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides generic deployment steps for machines post boot.
+"""
+
+from __future__ import with_statement
+
+import os
+import binascii
+
+from libcloud.utils.py3 import basestring, PY3
+
+
+class Deployment(object):
+ """
+ Base class for deployment tasks.
+ """
+
+ def run(self, node, client):
+ """
+ Runs this deployment task on node using the client provided.
+
+ :type node: :class:`Node`
+ :keyword node: Node to operate one
+
+ :type client: :class:`BaseSSHClient`
+ :keyword client: Connected SSH client to use.
+
+ :return: :class:`Node`
+ """
+ raise NotImplementedError(
+ 'run not implemented for this deployment')
+
+ def _get_string_value(self, argument_name, argument_value):
+ if not isinstance(argument_value, basestring) and \
+ not hasattr(argument_value, 'read'):
+ raise TypeError('%s argument must be a string or a file-like '
+ 'object' % (argument_name))
+
+ if hasattr(argument_value, 'read'):
+ argument_value = argument_value.read()
+
+ return argument_value
+
+
+class SSHKeyDeployment(Deployment):
+ """
+ Installs a public SSH Key onto a server.
+ """
+
+ def __init__(self, key):
+ """
+ :type key: ``str`` or :class:`File` object
+ :keyword key: Contents of the public key write or a file object which
+ can be read.
+ """
+ self.key = self._get_string_value(argument_name='key',
+ argument_value=key)
+
+ def run(self, node, client):
+ """
+ Installs SSH key into ``.ssh/authorized_keys``
+
+ See also :class:`Deployment.run`
+ """
+ client.put(".ssh/authorized_keys", contents=self.key, mode='a')
+ return node
+
+
+class FileDeployment(Deployment):
+ """
+ Installs a file on the server.
+ """
+
+ def __init__(self, source, target):
+ """
+ :type source: ``str``
+ :keyword source: Local path of file to be installed
+
+ :type target: ``str``
+ :keyword target: Path to install file on node
+ """
+ self.source = source
+ self.target = target
+
+ def run(self, node, client):
+ """
+ Upload the file, retaining permissions.
+
+ See also :class:`Deployment.run`
+ """
+ perms = int(oct(os.stat(self.source).st_mode)[4:], 8)
+
+ with open(self.source, 'rb') as fp:
+ content = fp.read()
+
+ client.put(path=self.target, chmod=perms,
+ contents=content)
+ return node
+
+
+class ScriptDeployment(Deployment):
+ """
+ Runs an arbitrary shell script on the server.
+
+ This step works by first writing the content of the shell script (script
+ argument) in a \*.sh file on a remote server and then running that file.
+
+ If you are running a non-shell script, make sure to put the appropriate
+ shebang to the top of the script. You are also advised to do that even if
+ you are running a plan shell script.
+ """
+
+ def __init__(self, script, args=None, name=None, delete=False):
+ """
+ :type script: ``str``
+ :keyword script: Contents of the script to run.
+
+ :type args: ``list``
+ :keyword args: Optional command line arguments which get passed to the
+ deployment script file.
+
+ :type name: ``str``
+ :keyword name: Name of the script to upload it as, if not specified,
+ a random name will be chosen.
+
+ :type delete: ``bool``
+ :keyword delete: Whether to delete the script on completion.
+ """
+ script = self._get_string_value(argument_name='script',
+ argument_value=script)
+
+ self.script = script
+ self.args = args or []
+ self.stdout = None
+ self.stderr = None
+ self.exit_status = None
+ self.delete = delete
+ self.name = name
+
+ if self.name is None:
+ # File is put under user's home directory
+ # (~/libcloud_deployment_.sh)
+ random_string = binascii.hexlify(os.urandom(4))
+ random_string = random_string.decode('ascii')
+ self.name = 'libcloud_deployment_%s.sh' % (random_string)
+
+ def run(self, node, client):
+ """
+ Uploads the shell script and then executes it.
+
+ See also :class:`Deployment.run`
+ """
+ file_path = client.put(path=self.name, chmod=int('755', 8),
+ contents=self.script)
+
+ # Pre-pend cwd if user specified a relative path
+ if self.name[0] != '/':
+ base_path = os.path.dirname(file_path)
+ name = os.path.join(base_path, self.name)
+ else:
+ name = self.name
+
+ cmd = name
+
+ if self.args:
+ # Append arguments to the command
+ cmd = '%s %s' % (name, ' '.join(self.args))
+ else:
+ cmd = name
+
+ self.stdout, self.stderr, self.exit_status = client.run(cmd)
+
+ if self.delete:
+ client.delete(self.name)
+
+ return node
+
+
+class ScriptFileDeployment(ScriptDeployment):
+ """
+ Runs an arbitrary shell script from a local file on the server. Same as
+ ScriptDeployment, except that you can pass in a path to the file instead of
+ the script content.
+ """
+
+ def __init__(self, script_file, args=None, name=None, delete=False):
+ """
+ :type script_file: ``str``
+ :keyword script_file: Path to a file containing the script to run.
+
+ :type args: ``list``
+ :keyword args: Optional command line arguments which get passed to the
+ deployment script file.
+
+
+ :type name: ``str``
+ :keyword name: Name of the script to upload it as, if not specified,
+ a random name will be chosen.
+
+ :type delete: ``bool``
+ :keyword delete: Whether to delete the script on completion.
+ """
+ with open(script_file, 'rb') as fp:
+ content = fp.read()
+
+ if PY3:
+ content = content.decode('utf-8')
+
+ super(ScriptFileDeployment, self).__init__(script=content,
+ args=args,
+ name=name,
+ delete=delete)
+
+
+class MultiStepDeployment(Deployment):
+ """
+ Runs a chain of Deployment steps.
+ """
+ def __init__(self, add=None):
+ """
+ :type add: ``list``
+ :keyword add: Deployment steps to add.
+ """
+ self.steps = []
+ self.add(add)
+
+ def add(self, add):
+ """
+ Add a deployment to this chain.
+
+ :type add: Single :class:`Deployment` or a ``list`` of
+ :class:`Deployment`
+ :keyword add: Adds this deployment to the others already in this
+ object.
+ """
+ if add is not None:
+ add = add if isinstance(add, (list, tuple)) else [add]
+ self.steps.extend(add)
+
+ def run(self, node, client):
+ """
+ Run each deployment that has been added.
+
+ See also :class:`Deployment.run`
+ """
+ for s in self.steps:
+ node = s.run(node, client)
+ return node
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/__init__.py b/awx/lib/site-packages/libcloud/compute/drivers/__init__.py
new file mode 100644
index 0000000000..ab02a6b186
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/__init__.py
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Drivers for working with different providers
+"""
+
+__all__ = [
+ 'abiquo',
+ 'brightbox',
+ 'bluebox',
+ 'dummy',
+ 'ec2',
+ 'ecp',
+ 'elasticstack',
+ 'elastichosts',
+ 'cloudsigma',
+ 'gce',
+ 'gogrid',
+ 'hostvirtual',
+ 'ibm_sce',
+ 'linode',
+ 'opennebula',
+ 'rackspace',
+ 'rimuhosting',
+ 'softlayer',
+ 'vcloud',
+ 'voxel',
+ 'vpsnet',
+]
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/abiquo.py b/awx/lib/site-packages/libcloud/compute/drivers/abiquo.py
new file mode 100644
index 0000000000..cccd5cfa68
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/abiquo.py
@@ -0,0 +1,759 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Abiquo Compute Driver
+
+The driver implements the compute Abiquo functionality for the Abiquo API.
+This version is compatible with the following versions of Abiquo:
+
+ * Abiquo 2.0 (http://wiki.abiquo.com/display/ABI20/The+Abiquo+API)
+ * Abiquo 2.2 (http://wiki.abiquo.com/display/ABI22/The+Abiquo+API)
+"""
+import xml.etree.ElementTree as ET
+
+from libcloud.compute.base import NodeDriver, NodeSize
+from libcloud.compute.types import Provider, LibcloudError
+from libcloud.common.abiquo import (AbiquoConnection, get_href,
+ AbiquoResponse)
+from libcloud.compute.base import NodeLocation, NodeImage, Node
+from libcloud.utils.py3 import tostring
+
+
+class AbiquoNodeDriver(NodeDriver):
+ """
+ Implements the :class:`NodeDriver`'s for the Abiquo Compute Provider
+ """
+
+ type = Provider.ABIQUO
+ name = 'Abiquo'
+ website = 'http://www.abiquo.com/'
+ connectionCls = AbiquoConnection
+ timeout = 2000 # some images take a lot of time!
+
+ # Media Types
+ NODES_MIME_TYPE = 'application/vnd.abiquo.virtualmachineswithnode+xml'
+ NODE_MIME_TYPE = 'application/vnd.abiquo.virtualmachinewithnode+xml'
+ VAPP_MIME_TYPE = 'application/vnd.abiquo.virtualappliance+xml'
+ VM_TASK_MIME_TYPE = 'application/vnd.abiquo.virtualmachinetask+xml'
+
+ # Others constants
+ GIGABYTE = 1073741824
+
+ def __init__(self, user_id, secret, endpoint, **kwargs):
+ """
+ Initializes Abiquo Driver
+
+ Initializes the :class:`NodeDriver` object and populate the cache.
+
+ :param user_id: identifier of Abiquo user (required)
+ :type user_id: ``str``
+ :param secret: password of the Abiquo user (required)
+ :type secret: ``str``
+ :param endpoint: Abiquo API endpoint (required)
+ :type endpoint: ``str`` that can be parsed as URL
+ """
+ self.endpoint = endpoint
+ super(AbiquoNodeDriver, self).__init__(key=user_id, secret=secret,
+ secure=False, host=None,
+ port=None, **kwargs)
+ self.ex_populate_cache()
+
+ def create_node(self, **kwargs):
+ """
+ Create a new node instance in Abiquo
+
+ All the :class:`Node`s need to be defined inside a VirtualAppliance
+ (called :class:`NodeGroup` here). If there is no group name defined,
+ 'libcloud' name will be used instead.
+
+ This method wraps these Abiquo actions:
+
+ 1. Create a group if it does not exist.
+ 2. Register a new node in the group.
+ 3. Deploy the node and boot it.
+ 4. Retrieves it again to get schedule-time attributes (such as ips
+ and vnc ports).
+
+ The rest of the driver methods has been created in a way that, if any
+ of these actions fail, the user can not reach an inconsistent state
+
+ :keyword name: The name for this new node (required)
+ :type name: ``str``
+
+ :keyword size: The size of resources allocated to this node.
+ :type size: :class:`NodeSize`
+
+ :keyword image: OS Image to boot on node. (required)
+ :type image: :class:`NodeImage`
+
+ :keyword location: Which data center to create a node in. If empty,
+ undefined behavior will be selected. (optional)
+ :type location: :class:`NodeLocation`
+
+ :keyword group_name: Which group this node belongs to. If empty,
+ it will be created into 'libcloud' group. If
+ it does not found any group in the target
+ location (random location if you have not set
+ the parameter), then it will create a new
+ group with this name.
+ :type group_name: c{str}
+
+ :return: The newly created node.
+ :rtype: :class:`Node`
+ """
+ # Define the location
+ # To be clear:
+ # 'xml_loc' is the xml element we navigate into (we need links)
+ # 'loc' is the :class:`NodeLocation` entity
+ xml_loc, loc = self._define_create_node_location(**kwargs)
+
+ # Define the Group
+ group = self._define_create_node_group(xml_loc, loc, **kwargs)
+
+ # Register the Node
+ vm = self._define_create_node_node(group, **kwargs)
+
+ # Execute the 'create' in hypervisor action
+ self._deploy_remote(vm)
+
+ # Retrieve it again, to get some schedule-time defined values
+ edit_vm = get_href(vm, 'edit')
+ headers = {'Accept': self.NODE_MIME_TYPE}
+ vm = self.connection.request(edit_vm, headers=headers).object
+ return self._to_node(vm, self)
+
+ def destroy_node(self, node):
+ """
+ Destroy a node
+
+ Depending on the provider, this may destroy all data associated with
+ the node, including backups.
+
+ :param node: The node to be destroyed
+ :type node: :class:`Node`
+
+ :return: True if the destroy was successful, otherwise False
+ :rtype: ``bool``
+ """
+
+ # Refresh node state
+ e_vm = self.connection.request(node.extra['uri_id']).object
+ state = e_vm.findtext('state')
+
+ if state in ['ALLOCATED', 'CONFIGURED', 'LOCKED', 'UNKNOWN']:
+ raise LibcloudError('Invalid Node state', self)
+
+ if state != 'NOT_ALLOCATED':
+ # prepare the element that forces the undeploy
+ vm_task = ET.Element('virtualmachinetask')
+ force_undeploy = ET.SubElement(vm_task, 'forceUndeploy')
+ force_undeploy.text = 'True'
+ # Set the URI
+ destroy_uri = node.extra['uri_id'] + '/action/undeploy'
+ # Prepare the headers
+ headers = {'Content-type': self.VM_TASK_MIME_TYPE}
+ res = self.connection.async_request(action=destroy_uri,
+ method='POST',
+ data=tostring(vm_task),
+ headers=headers)
+
+ if state == 'NOT_ALLOCATED' or res.async_success():
+ self.connection.request(action=node.extra['uri_id'],
+ method='DELETE')
+ return True
+ else:
+ return False
+
+ def ex_run_node(self, node):
+ """
+ Runs a node
+
+ Here there is a bit difference between Abiquo states and libcloud
+ states, so this method is created to have better compatibility. In
+ libcloud, if the node is not running, then it does not exist (avoiding
+ UNKNOWN and temporal states). In Abiquo, you can define a node, and
+ then deploy it.
+
+ If the node is in :class:`NodeState.TERMINATED` libcloud's state and in
+ 'NOT_DEPLOYED' Abiquo state, there is a way to run and recover it
+ for libcloud using this method. There is no way to reach this state
+ if you are using only libcloud, but you may have used another Abiquo
+ client and now you want to recover your node to be used by libcloud.
+
+ :param node: The node to run
+ :type node: :class:`Node`
+
+ :return: The node itself, but with the new state
+ :rtype: :class:`Node`
+ """
+ # Refresh node state
+ e_vm = self.connection.request(node.extra['uri_id']).object
+ state = e_vm.findtext('state')
+
+ if state != 'NOT_ALLOCATED':
+ raise LibcloudError('Invalid Node state', self)
+
+ # --------------------------------------------------------
+ # Deploy the Node
+ # --------------------------------------------------------
+ self._deploy_remote(e_vm)
+
+ # --------------------------------------------------------
+ # Retrieve it again, to get some schedule-defined
+ # values.
+ # --------------------------------------------------------
+ edit_vm = get_href(e_vm, 'edit')
+ headers = {'Accept': self.NODE_MIME_TYPE}
+ e_vm = self.connection.request(edit_vm, headers=headers).object
+ return self._to_node(e_vm, self)
+
+ def ex_populate_cache(self):
+ """
+ Populate the cache.
+
+ For each connection, it is good to store some objects that will be
+ useful for further requests, such as the 'user' and the 'enterprise'
+ objects.
+
+ Executes the 'login' resource after setting the connection parameters
+ and, if the execution is successful, it sets the 'user' object into
+ cache. After that, it also requests for the 'enterprise' and
+ 'locations' data.
+
+ List of locations should remain the same for a single libcloud
+ connection. However, this method is public and you are able to
+ refresh the list of locations any time.
+ """
+ user = self.connection.request('/login').object
+ self.connection.cache['user'] = user
+ e_ent = get_href(self.connection.cache['user'],
+ 'enterprise')
+ ent = self.connection.request(e_ent).object
+ self.connection.cache['enterprise'] = ent
+
+ uri_vdcs = '/cloud/virtualdatacenters'
+ e_vdcs = self.connection.request(uri_vdcs).object
+
+ # Set a dict for the datacenter and its href for a further search
+ params = {"idEnterprise": self._get_enterprise_id()}
+ e_dcs = self.connection.request('/admin/datacenters',
+ params=params).object
+ dc_dict = {}
+ for dc in e_dcs.findall('datacenter'):
+ key = get_href(dc, 'edit')
+ dc_dict[key] = dc
+
+ # Populate locations cache
+ self.connection.cache['locations'] = {}
+ for e_vdc in e_vdcs.findall('virtualDatacenter'):
+ dc_link = get_href(e_vdc, 'datacenter')
+ loc = self._to_location(e_vdc, dc_dict[dc_link], self)
+
+ # Save into cache the link to the itself because we will need
+ # it in the future, but we save here to don't extend the class
+ # :class:`NodeLocation`.
+ # So here we have the dict: :class:`NodeLocation` ->
+ # link_datacenter
+ self.connection.cache['locations'][loc] = get_href(e_vdc, 'edit')
+
+ def ex_create_group(self, name, location=None):
+ """
+ Create an empty group.
+
+ You can specify the location as well.
+
+ :param group: name of the group (required)
+ :type group: ``str``
+
+ :param location: location were to create the group
+ :type location: :class:`NodeLocation`
+
+ :returns: the created group
+ :rtype: :class:`NodeGroup`
+ """
+ # prepare the element
+ vapp = ET.Element('virtualAppliance')
+ vapp_name = ET.SubElement(vapp, 'name')
+ vapp_name.text = name
+
+ if location is None:
+ location = self.list_locations()[0]
+ elif location not in self.list_locations():
+ raise LibcloudError('Location does not exist')
+
+ link_vdc = self.connection.cache['locations'][location]
+ e_vdc = self.connection.request(link_vdc).object
+
+ creation_link = get_href(e_vdc, 'virtualappliances')
+ headers = {'Content-type': self.VAPP_MIME_TYPE}
+ vapp = self.connection.request(creation_link, data=tostring(vapp),
+ headers=headers, method='POST').object
+
+ uri_vapp = get_href(vapp, 'edit')
+
+ return NodeGroup(self, vapp.findtext('name'),
+ uri=uri_vapp)
+
+ def ex_destroy_group(self, group):
+ """
+ Destroy a group.
+
+ Be careful! Destroying a group means destroying all the :class:`Node`s
+ there and the group itself!
+
+ If there is currently any action over any :class:`Node` of the
+ :class:`NodeGroup`, then the method will raise an exception.
+
+ :param name: The group (required)
+ :type name: :class:`NodeGroup`
+
+ :return: If the group was destroyed successfully
+ :rtype: ``bool``
+ """
+ # Refresh group state
+ e_group = self.connection.request(group.uri).object
+ state = e_group.findtext('state')
+
+ if state not in ['NOT_DEPLOYED', 'DEPLOYED']:
+ error = 'Can not destroy group because of current state'
+ raise LibcloudError(error, self)
+
+ if state == 'DEPLOYED':
+ # prepare the element that forces the undeploy
+ vm_task = ET.Element('virtualmachinetask')
+ force_undeploy = ET.SubElement(vm_task, 'forceUndeploy')
+ force_undeploy.text = 'True'
+
+ # Set the URI
+ undeploy_uri = group.uri + '/action/undeploy'
+
+ # Prepare the headers
+ headers = {'Content-type': self.VM_TASK_MIME_TYPE}
+ res = self.connection.async_request(action=undeploy_uri,
+ method='POST',
+ data=tostring(vm_task),
+ headers=headers)
+
+ if state == 'NOT_DEPLOYED' or res.async_success():
+ # The node is no longer deployed. Unregister it.
+ self.connection.request(action=group.uri,
+ method='DELETE')
+ return True
+ else:
+ return False
+
+ def ex_list_groups(self, location=None):
+ """
+ List all groups.
+
+ :param location: filter the groups by location (optional)
+ :type location: a :class:`NodeLocation` instance.
+
+ :return: the list of :class:`NodeGroup`
+ """
+ groups = []
+ for vdc in self._get_locations(location):
+ link_vdc = self.connection.cache['locations'][vdc]
+ e_vdc = self.connection.request(link_vdc).object
+ apps_link = get_href(e_vdc, 'virtualappliances')
+ vapps = self.connection.request(apps_link).object
+ for vapp in vapps.findall('virtualAppliance'):
+ nodes = []
+ vms_link = get_href(vapp, 'virtualmachines')
+ headers = {'Accept': self.NODES_MIME_TYPE}
+ vms = self.connection.request(vms_link, headers=headers).object
+ for vm in vms.findall('virtualmachinewithnode'):
+ nodes.append(self._to_node(vm, self))
+
+ group = NodeGroup(self, vapp.findtext('name'),
+ nodes, get_href(vapp, 'edit'))
+ groups.append(group)
+
+ return groups
+
+ def list_images(self, location=None):
+ """
+ List images on Abiquo Repositories
+
+ :keyword location: The location to list images for.
+ :type location: :class:`NodeLocation`
+
+ :return: list of node image objects
+ :rtype: ``list`` of :class:`NodeImage`
+ """
+ enterprise_id = self._get_enterprise_id()
+ uri = '/admin/enterprises/%s/datacenterrepositories/' % (enterprise_id)
+ repos = self.connection.request(uri).object
+
+ images = []
+ for repo in repos.findall('datacenterRepository'):
+ # filter by location. Skips when the name of the location
+ # is different from the 'datacenterRepository' element
+ for vdc in self._get_locations(location):
+ # Check if the virtual datacenter belongs to this repo
+ link_vdc = self.connection.cache['locations'][vdc]
+ e_vdc = self.connection.request(link_vdc).object
+ dc_link_vdc = get_href(e_vdc, 'datacenter')
+ dc_link_repo = get_href(repo, 'datacenter')
+
+ if dc_link_vdc == dc_link_repo:
+ # Filter the template in case we don't have it yet
+ url_templates = get_href(repo, 'virtualmachinetemplates')
+ hypervisor_type = e_vdc.findtext('hypervisorType')
+ params = {'hypervisorTypeName': hypervisor_type}
+ templates = self.connection.request(url_templates,
+ params).object
+ for templ in templates.findall('virtualMachineTemplate'):
+ # Avoid duplicated templates
+ id_template = templ.findtext('id')
+ ids = [image.id for image in images]
+ if id_template not in ids:
+ images.append(self._to_nodeimage(templ, self,
+ get_href(repo,
+ 'edit')))
+
+ return images
+
+ def list_locations(self):
+ """
+ Return list of locations where the user has access to.
+
+ :return: the list of :class:`NodeLocation` available for the current
+ user
+ :rtype: ``list`` of :class:`NodeLocation`
+ """
+ return list(self.connection.cache['locations'].keys())
+
+ def list_nodes(self, location=None):
+ """
+ List all nodes.
+
+ :param location: Filter the groups by location (optional)
+ :type location: a :class:`NodeLocation` instance.
+
+ :return: List of node objects
+ :rtype: ``list`` of :class:`Node`
+ """
+ nodes = []
+
+ for group in self.ex_list_groups(location):
+ nodes.extend(group.nodes)
+
+ return nodes
+
+ def list_sizes(self, location=None):
+ """
+ List sizes on a provider.
+
+ Abiquo does not work with sizes. However, this method
+ returns a list of predefined ones (copied from :class:`DummyNodeDriver`
+ but without price neither bandwidth) to help the users to create their
+ own.
+
+ If you call the method :class:`AbiquoNodeDriver.create_node` with the
+ size informed, it will just override the 'ram' value of the 'image'
+ template. So it is no too much usefull work with sizes...
+
+ :return: The list of sizes
+ :rtype: ``list`` of :class:`NodeSizes`
+ """
+ return [
+ NodeSize(id=1,
+ name='Small',
+ ram=128,
+ disk=4,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ NodeSize(id=2,
+ name='Medium',
+ ram=512,
+ disk=16,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ NodeSize(id=3,
+ name='Big',
+ ram=4096,
+ disk=32,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ NodeSize(id=4,
+ name="XXL Big",
+ ram=4096 * 2,
+ disk=32 * 4,
+ bandwidth=None,
+ price=None,
+ driver=self)
+ ]
+
+ def reboot_node(self, node):
+ """
+ Reboot a node.
+
+ :param node: The node to be rebooted
+ :type node: :class:`Node`
+
+ :return: True if the reboot was successful, otherwise False
+ :rtype: ``bool``
+ """
+ reboot_uri = node.extra['uri_id'] + '/action/reset'
+ res = self.connection.async_request(action=reboot_uri, method='POST')
+ return res.async_success()
+
+ # -------------------------
+ # Extenstion methods
+ # -------------------------
+
+ def _ex_connection_class_kwargs(self):
+ """
+ Set the endpoint as an extra :class:`AbiquoConnection` argument.
+
+ According to Connection code, the "url" argument should be
+ parsed properly to connection.
+
+ :return: ``dict`` of :class:`AbiquoConnection` input arguments
+ """
+
+ return {'url': self.endpoint}
+
+ def _deploy_remote(self, e_vm):
+ """
+ Asynchronous call to create the node.
+ """
+ # --------------------------------------------------------
+ # Deploy the Node
+ # --------------------------------------------------------
+ # prepare the element that forces the deploy
+ vm_task = ET.Element('virtualmachinetask')
+ force_deploy = ET.SubElement(vm_task, 'forceEnterpriseSoftLimits')
+ force_deploy.text = 'True'
+
+ # Prepare the headers
+ headers = {'Content-type': self.VM_TASK_MIME_TYPE}
+ link_deploy = get_href(e_vm, 'deploy')
+ res = self.connection.async_request(action=link_deploy, method='POST',
+ data=tostring(vm_task),
+ headers=headers)
+ if not res.async_success():
+ raise LibcloudError('Could not run the node', self)
+
+ def _to_location(self, vdc, dc, driver):
+ """
+ Generates the :class:`NodeLocation` class.
+ """
+ identifier = vdc.findtext('id')
+ name = vdc.findtext('name')
+ country = dc.findtext('name')
+ return NodeLocation(identifier, name, country, driver)
+
+ def _to_node(self, vm, driver):
+ """
+ Generates the :class:`Node` class.
+ """
+ identifier = vm.findtext('id')
+ name = vm.findtext('nodeName')
+ state = AbiquoResponse.NODE_STATE_MAP[vm.findtext('state')]
+
+ link_image = get_href(vm, 'virtualmachinetemplate')
+ image_element = self.connection.request(link_image).object
+ repo_link = get_href(image_element, 'datacenterrepository')
+ image = self._to_nodeimage(image_element, self, repo_link)
+
+ # Fill the 'ips' data
+ private_ips = []
+ public_ips = []
+ nics_element = self.connection.request(get_href(vm, 'nics')).object
+ for nic in nics_element.findall('nic'):
+ ip = nic.findtext('ip')
+ for link in nic.findall('link'):
+ rel = link.attrib['rel']
+ if rel == 'privatenetwork':
+ private_ips.append(ip)
+ elif rel in ['publicnetwork', 'externalnetwork',
+ 'unmanagednetwork']:
+ public_ips.append(ip)
+
+ extra = {'uri_id': get_href(vm, 'edit')}
+
+ if vm.find('vdrpIp') is not None:
+ extra['vdrp_ip'] = vm.findtext('vdrpIP')
+ extra['vdrp_port'] = vm.findtext('vdrpPort')
+
+ return Node(identifier, name, state, public_ips, private_ips,
+ driver, image=image, extra=extra)
+
+ def _to_nodeimage(self, template, driver, repo):
+ """
+ Generates the :class:`NodeImage` class.
+ """
+ identifier = template.findtext('id')
+ name = template.findtext('name')
+ url = get_href(template, 'edit')
+ extra = {'repo': repo, 'url': url}
+ return NodeImage(identifier, name, driver, extra)
+
+ def _get_locations(self, location=None):
+ """
+ Returns the locations as a generator.
+ """
+ if location is not None:
+ yield location
+ else:
+ for loc in self.list_locations():
+ yield loc
+
+ def _get_enterprise_id(self):
+ """
+ Returns the identifier of the logged user's enterprise.
+ """
+ return self.connection.cache['enterprise'].findtext('id')
+
+ def _define_create_node_location(self, **kwargs):
+ """
+ Search for a location where to create the node.
+
+ Based on 'create_node' **kwargs argument, decide in which
+ location will be created.
+ """
+ # First, get image location
+ if 'image' not in kwargs:
+ error = "'image' parameter is mandatory"
+ raise LibcloudError(error, self)
+
+ image = kwargs['image']
+
+ # Get the location argument
+ location = None
+ if 'location' in kwargs:
+ location = kwargs['location']
+ if location not in self.list_locations():
+ raise LibcloudError('Location does not exist')
+
+ # Check if the image is compatible with any of the locations or
+ # the input location
+ loc = None
+ target_loc = None
+ for candidate_loc in self._get_locations(location):
+ link_vdc = self.connection.cache['locations'][candidate_loc]
+ e_vdc = self.connection.request(link_vdc).object
+ # url_location = get_href(e_vdc, 'datacenter')
+ for img in self.list_images(candidate_loc):
+ if img.id == image.id:
+ loc = e_vdc
+ target_loc = candidate_loc
+ break
+
+ if loc is None:
+ error = 'The image can not be used in any location'
+ raise LibcloudError(error, self)
+
+ return loc, target_loc
+
+ def _define_create_node_group(self, xml_loc, loc, **kwargs):
+ """
+ Search for a group where to create the node.
+
+ If we can not find any group, create it into argument 'location'
+ """
+ if 'group_name' not in kwargs:
+ group_name = NodeGroup.DEFAULT_GROUP_NAME
+ else:
+ group_name = kwargs['group_name']
+
+ # We search if the group is already defined into the location
+ groups_link = get_href(xml_loc, 'virtualappliances')
+ vapps_element = self.connection.request(groups_link).object
+ target_group = None
+ for vapp in vapps_element.findall('virtualAppliance'):
+ if vapp.findtext('name') == group_name:
+ uri_vapp = get_href(vapp, 'edit')
+ return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp)
+
+ # target group not found: create it. Since it is an extension of
+ # the basic 'libcloud' functionality, we try to be as flexible as
+ # possible.
+ if target_group is None:
+ return self.ex_create_group(group_name, loc)
+
+ def _define_create_node_node(self, group, **kwargs):
+ """
+ Defines the node before to create.
+
+ In Abiquo, you first need to 'register' or 'define' the node in
+ the API before to create it into the target hypervisor.
+ """
+ vm = ET.Element('virtualmachinewithnode')
+ if 'name' in kwargs:
+ vmname = ET.SubElement(vm, 'nodeName')
+ vmname.text = kwargs['name']
+ attrib = {'type': 'application/vnd.abiquo/virtualmachinetemplate+xml',
+ 'rel': 'virtualmachinetemplate',
+ 'href': kwargs['image'].extra['url']}
+ ET.SubElement(vm, 'link', attrib=attrib)
+ headers = {'Content-type': self.NODE_MIME_TYPE}
+
+ if 'size' in kwargs:
+ # Override the 'NodeSize' data
+ ram = ET.SubElement(vm, 'ram')
+ ram.text = str(kwargs['size'].ram)
+ hd = ET.SubElement(vm, 'hdInBytes')
+ hd.text = str(int(kwargs['size'].disk) * self.GIGABYTE)
+
+ # Create the virtual machine
+ nodes_link = group.uri + '/virtualmachines'
+ vm = self.connection.request(nodes_link, data=tostring(vm),
+ headers=headers, method='POST').object
+ edit_vm = get_href(vm, 'edit')
+ headers = {'Accept': self.NODE_MIME_TYPE}
+
+ return self.connection.request(edit_vm, headers=headers).object
+
+
+class NodeGroup(object):
+ """
+ Group of virtual machines that can be managed together
+
+ All :class:`Node`s in Abiquo must be defined inside a Virtual Appliance.
+ We offer a way to handle virtual appliances (called NodeGroup to
+ maintain some kind of name conventions here) inside the
+ :class:`AbiquoNodeDriver` without breaking compatibility of the rest of
+ libcloud API.
+
+ If the user does not want to handle groups, all the virtual machines
+ will be created inside a group named 'libcloud'
+ """
+ DEFAULT_GROUP_NAME = 'libcloud'
+
+ def __init__(self, driver, name=DEFAULT_GROUP_NAME, nodes=[], uri=''):
+ """
+ Initialize a new group object.
+ """
+ self.driver = driver
+ self.name = name
+ self.nodes = nodes
+ self.uri = uri
+
+ def __repr__(self):
+ return (('')
+ % (self.name, ",".join(map(str, self.nodes))))
+
+ def destroy(self):
+ """
+ Destroys the group delegating the execution to
+ :class:`AbiquoNodeDriver`.
+ """
+ return self.driver.ex_destroy_group(self)
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/bluebox.py b/awx/lib/site-packages/libcloud/compute/drivers/bluebox.py
new file mode 100644
index 0000000000..204e0de6bf
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/bluebox.py
@@ -0,0 +1,235 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+libcloud driver for the Blue Box Blocks API
+
+This driver implements all libcloud functionality for the Blue Box Blocks API.
+
+Blue Box home page http://bluebox.net
+Blue Box API documentation https://boxpanel.bluebox
+.net/public/the_vault/index.php/Blocks_API
+"""
+
+import copy
+import base64
+
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import b
+
+from libcloud.common.base import JsonResponse, ConnectionUserAndKey
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState, InvalidCredsError
+from libcloud.compute.base import Node, NodeDriver
+from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
+from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
+
+# Current end point for Blue Box API.
+BLUEBOX_API_HOST = "boxpanel.bluebox.net"
+
+# The API doesn't currently expose all of the required values for libcloud,
+# so we simply list what's available right now, along with all of the various
+# attributes that are needed by libcloud.
+BLUEBOX_INSTANCE_TYPES = {
+ '1gb': {
+ 'id': '94fd37a7-2606-47f7-84d5-9000deda52ae',
+ 'name': 'Block 1GB Virtual Server',
+ 'ram': 1024,
+ 'disk': 20,
+ 'cpu': 0.5
+ },
+ '2gb': {
+ 'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092',
+ 'name': 'Block 2GB Virtual Server',
+ 'ram': 2048,
+ 'disk': 25,
+ 'cpu': 1
+ },
+ '4gb': {
+ 'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58',
+ 'name': 'Block 4GB Virtual Server',
+ 'ram': 4096,
+ 'disk': 50,
+ 'cpu': 2
+ },
+ '8gb': {
+ 'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251',
+ 'name': 'Block 8GB Virtual Server',
+ 'ram': 8192,
+ 'disk': 100,
+ 'cpu': 4
+ }
+}
+
+RAM_PER_CPU = 2048
+
+NODE_STATE_MAP = {'queued': NodeState.PENDING,
+ 'building': NodeState.PENDING,
+ 'running': NodeState.RUNNING,
+ 'error': NodeState.TERMINATED,
+ 'unknown': NodeState.UNKNOWN}
+
+
+class BlueboxResponse(JsonResponse):
+ def parse_error(self):
+ if int(self.status) == 401:
+ if not self.body:
+ raise InvalidCredsError(str(self.status) + ': ' + self.error)
+ else:
+ raise InvalidCredsError(self.body)
+ return self.body
+
+
+class BlueboxNodeSize(NodeSize):
+ def __init__(self, id, name, cpu, ram, disk, price, driver):
+ self.id = id
+ self.name = name
+ self.cpu = cpu
+ self.ram = ram
+ self.disk = disk
+ self.price = price
+ self.driver = driver
+
+ def __repr__(self):
+ return ((
+ '')
+ % (self.id, self.name, self.cpu, self.ram, self.disk,
+ self.price, self.driver.name))
+
+
+class BlueboxConnection(ConnectionUserAndKey):
+ """
+ Connection class for the Bluebox driver
+ """
+
+ host = BLUEBOX_API_HOST
+ secure = True
+ responseCls = BlueboxResponse
+
+ allow_insecure = False
+
+ def add_default_headers(self, headers):
+ user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
+ headers['Authorization'] = 'Basic %s' % (user_b64)
+ return headers
+
+
+class BlueboxNodeDriver(NodeDriver):
+ """
+ Bluebox Blocks node driver
+ """
+
+ connectionCls = BlueboxConnection
+ type = Provider.BLUEBOX
+ api_name = 'bluebox'
+ name = 'Bluebox Blocks'
+ website = 'http://bluebox.net'
+ features = {'create_node': ['ssh_key', 'password']}
+
+ def list_nodes(self):
+ result = self.connection.request('/api/blocks.json')
+ return [self._to_node(i) for i in result.object]
+
+ def list_sizes(self, location=None):
+ sizes = []
+ for key, values in list(BLUEBOX_INSTANCE_TYPES.items()):
+ attributes = copy.deepcopy(values)
+ attributes.update({'price': self._get_size_price(size_id=key)})
+ sizes.append(BlueboxNodeSize(driver=self.connection.driver,
+ **attributes))
+
+ return sizes
+
+ def list_images(self, location=None):
+ result = self.connection.request('/api/block_templates.json')
+ images = []
+ for image in result.object:
+ images.extend([self._to_image(image)])
+
+ return images
+
+ def create_node(self, **kwargs):
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ size = kwargs["size"]
+
+ name = kwargs['name']
+ image = kwargs['image']
+ size = kwargs['size']
+
+ auth = self._get_and_check_auth(kwargs.get('auth'))
+
+ data = {
+ 'hostname': name,
+ 'product': size.id,
+ 'template': image.id
+ }
+
+ ssh = None
+ password = None
+
+ if isinstance(auth, NodeAuthSSHKey):
+ ssh = auth.pubkey
+ data.update(ssh_public_key=ssh)
+ elif isinstance(auth, NodeAuthPassword):
+ password = auth.password
+ data.update(password=password)
+
+ if "ex_username" in kwargs:
+ data.update(username=kwargs["ex_username"])
+
+ if not ssh and not password:
+ raise Exception("SSH public key or password required.")
+
+ params = urlencode(data)
+ result = self.connection.request('/api/blocks.json', headers=headers,
+ data=params, method='POST')
+ node = self._to_node(result.object)
+
+ if getattr(auth, "generated", False):
+ node.extra['password'] = auth.password
+
+ return node
+
+ def destroy_node(self, node):
+ url = '/api/blocks/%s.json' % (node.id)
+ result = self.connection.request(url, method='DELETE')
+
+ return result.status == 200
+
+ def list_locations(self):
+ return [NodeLocation(0, "Blue Box Seattle US", 'US', self)]
+
+ def reboot_node(self, node):
+ url = '/api/blocks/%s/reboot.json' % (node.id)
+ result = self.connection.request(url, method="PUT")
+ return result.status == 200
+
+ def _to_node(self, vm):
+ state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)]
+ n = Node(id=vm['id'],
+ name=vm['hostname'],
+ state=state,
+ public_ips=[ip['address'] for ip in vm['ips']],
+ private_ips=[],
+ extra={'storage': vm['storage'], 'cpu': vm['cpu']},
+ driver=self.connection.driver)
+ return n
+
+ def _to_image(self, image):
+ image = NodeImage(id=image['id'],
+ name=image['description'],
+ driver=self.connection.driver)
+ return image
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/brightbox.py b/awx/lib/site-packages/libcloud/compute/drivers/brightbox.py
new file mode 100644
index 0000000000..8798332ff0
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/brightbox.py
@@ -0,0 +1,306 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Brightbox Driver
+"""
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+from libcloud.common.brightbox import BrightboxConnection
+from libcloud.compute.types import Provider, NodeState
+from libcloud.compute.base import NodeDriver
+from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
+
+import base64
+
+
+API_VERSION = '1.0'
+
+
+def _extract(d, keys):
+ return dict((k, d[k]) for k in keys if k in d and d[k] is not None)
+
+
+class BrightboxNodeDriver(NodeDriver):
+ """
+ Brightbox node driver
+ """
+
+ connectionCls = BrightboxConnection
+
+ type = Provider.BRIGHTBOX
+ name = 'Brightbox'
+ website = 'http://www.brightbox.co.uk/'
+
+ NODE_STATE_MAP = {'creating': NodeState.PENDING,
+ 'active': NodeState.RUNNING,
+ 'inactive': NodeState.UNKNOWN,
+ 'deleting': NodeState.UNKNOWN,
+ 'deleted': NodeState.TERMINATED,
+ 'failed': NodeState.UNKNOWN,
+ 'unavailable': NodeState.UNKNOWN}
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ api_version=API_VERSION, **kwargs):
+ super(BrightboxNodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure,
+ host=host, port=port,
+ api_version=api_version,
+ **kwargs)
+
+ def _to_node(self, data):
+ extra_data = _extract(data, ['fqdn', 'user_data', 'status',
+ 'interfaces', 'snapshots',
+ 'server_groups', 'hostname',
+ 'started_at', 'created_at',
+ 'deleted_at'])
+ extra_data['zone'] = self._to_location(data['zone'])
+
+ ipv6_addresses = [interface['ipv6_address'] for interface
+ in data['interfaces'] if 'ipv6_address' in interface]
+
+ private_ips = [interface['ipv4_address']
+ for interface in data['interfaces']
+ if 'ipv4_address' in interface]
+
+ public_ips = [cloud_ip['public_ip'] for cloud_ip in data['cloud_ips']]
+ public_ips += ipv6_addresses
+
+ return Node(
+ id=data['id'],
+ name=data['name'],
+ state=self.NODE_STATE_MAP[data['status']],
+ private_ips=private_ips,
+ public_ips=public_ips,
+ driver=self.connection.driver,
+ size=self._to_size(data['server_type']),
+ image=self._to_image(data['image']),
+ extra=extra_data
+ )
+
+ def _to_image(self, data):
+ extra_data = _extract(data, ['arch', 'compatibility_mode',
+ 'created_at', 'description',
+ 'disk_size', 'min_ram', 'official',
+ 'owner', 'public', 'source',
+ 'source_type', 'status', 'username',
+ 'virtual_size', 'licence_name'])
+
+ if data.get('ancestor', None):
+ extra_data['ancestor'] = self._to_image(data['ancestor'])
+
+ return NodeImage(
+ id=data['id'],
+ name=data['name'],
+ driver=self,
+ extra=extra_data
+ )
+
+ def _to_size(self, data):
+ return NodeSize(
+ id=data['id'],
+ name=data['name'],
+ ram=data['ram'],
+ disk=data['disk_size'],
+ bandwidth=0,
+ price=0,
+ driver=self
+ )
+
+ def _to_location(self, data):
+ if data:
+ return NodeLocation(
+ id=data['id'],
+ name=data['handle'],
+ country='GB',
+ driver=self
+ )
+ else:
+ return None
+
+ def _post(self, path, data={}):
+ headers = {'Content-Type': 'application/json'}
+ return self.connection.request(path, data=data, headers=headers,
+ method='POST')
+
+ def _put(self, path, data={}):
+ headers = {'Content-Type': 'application/json'}
+ return self.connection.request(path, data=data, headers=headers,
+ method='PUT')
+
+ def create_node(self, **kwargs):
+ """Create a new Brightbox node
+
+ Reference: https://api.gb1.brightbox.com/1.0/#server_create_server
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_userdata: User data
+ :type ex_userdata: ``str``
+
+ :keyword ex_servergroup: Name or list of server group ids to
+ add server to
+ :type ex_servergroup: ``str`` or ``list`` of ``str``
+ """
+ data = {
+ 'name': kwargs['name'],
+ 'server_type': kwargs['size'].id,
+ 'image': kwargs['image'].id,
+ }
+
+ if 'ex_userdata' in kwargs:
+ data['user_data'] = base64.b64encode(b(kwargs['ex_userdata'])) \
+ .decode('ascii')
+
+ if 'location' in kwargs:
+ data['zone'] = kwargs['location'].id
+
+ if 'ex_servergroup' in kwargs:
+ if not isinstance(kwargs['ex_servergroup'], list):
+ kwargs['ex_servergroup'] = [kwargs['ex_servergroup']]
+ data['server_groups'] = kwargs['ex_servergroup']
+
+ data = self._post('/%s/servers' % self.api_version, data).object
+ return self._to_node(data)
+
+ def destroy_node(self, node):
+ response = self.connection.request(
+ '/%s/servers/%s' % (self.api_version, node.id),
+ method='DELETE')
+ return response.status == httplib.ACCEPTED
+
+ def list_nodes(self):
+ data = self.connection.request('/%s/servers' % self.api_version).object
+ return list(map(self._to_node, data))
+
+ def list_images(self, location=None):
+ data = self.connection.request('/%s/images' % self.api_version).object
+ return list(map(self._to_image, data))
+
+ def list_sizes(self):
+ data = self.connection.request('/%s/server_types' % self.api_version) \
+ .object
+ return list(map(self._to_size, data))
+
+ def list_locations(self):
+ data = self.connection.request('/%s/zones' % self.api_version).object
+ return list(map(self._to_location, data))
+
+ def ex_list_cloud_ips(self):
+ """
+ List Cloud IPs
+
+ @note: This is an API extension for use on Brightbox
+
+ :rtype: ``list`` of ``dict``
+ """
+ return self.connection.request('/%s/cloud_ips' % self.api_version) \
+ .object
+
+ def ex_create_cloud_ip(self, reverse_dns=None):
+ """
+ Requests a new cloud IP address for the account
+
+ @note: This is an API extension for use on Brightbox
+
+ :param reverse_dns: Reverse DNS hostname
+ :type reverse_dns: ``str``
+
+ :rtype: ``dict``
+ """
+ params = {}
+
+ if reverse_dns:
+ params['reverse_dns'] = reverse_dns
+
+ return self._post('/%s/cloud_ips' % self.api_version, params).object
+
+ def ex_update_cloud_ip(self, cloud_ip_id, reverse_dns):
+ """
+ Update some details of the cloud IP address
+
+ @note: This is an API extension for use on Brightbox
+
+ :param cloud_ip_id: The id of the cloud ip.
+ :type cloud_ip_id: ``str``
+
+ :param reverse_dns: Reverse DNS hostname
+ :type reverse_dns: ``str``
+
+ :rtype: ``dict``
+ """
+ response = self._put('/%s/cloud_ips/%s' % (self.api_version,
+ cloud_ip_id),
+ {'reverse_dns': reverse_dns})
+ return response.status == httplib.OK
+
+ def ex_map_cloud_ip(self, cloud_ip_id, interface_id):
+ """
+ Maps (or points) a cloud IP address at a server's interface
+ or a load balancer to allow them to respond to public requests
+
+ @note: This is an API extension for use on Brightbox
+
+ :param cloud_ip_id: The id of the cloud ip.
+ :type cloud_ip_id: ``str``
+
+ :param interface_id: The Interface ID or LoadBalancer ID to
+ which this Cloud IP should be mapped to
+ :type interface_id: ``str``
+
+ :return: True if the mapping was successful.
+ :rtype: ``bool``
+ """
+ response = self._post('/%s/cloud_ips/%s/map' % (self.api_version,
+ cloud_ip_id),
+ {'destination': interface_id})
+ return response.status == httplib.ACCEPTED
+
+ def ex_unmap_cloud_ip(self, cloud_ip_id):
+ """
+ Unmaps a cloud IP address from its current destination making
+ it available to remap. This remains in the account's pool
+ of addresses
+
+ @note: This is an API extension for use on Brightbox
+
+ :param cloud_ip_id: The id of the cloud ip.
+ :type cloud_ip_id: ``str``
+
+ :return: True if the unmap was successful.
+ :rtype: ``bool``
+ """
+ response = self._post('/%s/cloud_ips/%s/unmap' % (self.api_version,
+ cloud_ip_id))
+ return response.status == httplib.ACCEPTED
+
+ def ex_destroy_cloud_ip(self, cloud_ip_id):
+ """
+ Release the cloud IP address from the account's ownership
+
+ @note: This is an API extension for use on Brightbox
+
+ :param cloud_ip_id: The id of the cloud ip.
+ :type cloud_ip_id: ``str``
+
+ :return: True if the unmap was successful.
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ '/%s/cloud_ips/%s' % (self.api_version,
+ cloud_ip_id),
+ method='DELETE')
+ return response.status == httplib.OK
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/cloudframes.py b/awx/lib/site-packages/libcloud/compute/drivers/cloudframes.py
new file mode 100644
index 0000000000..9902f5dbb9
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/cloudframes.py
@@ -0,0 +1,431 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+CloudFrames Driver
+
+"""
+
+# (name, ram, disk, bandwidth, price, vcpus)
+SIZES = [
+ ('512mb_1core_10gb', 512, 10, 512, 0.025, 1),
+ ('1024mb_1core_20gb', 1024, 20, 512, 0.05, 1),
+ ('2048mb_2core_50gb', 2048, 50, 1024, 0.10, 2),
+ ('4096mb_2core_100gb', 4096, 100, 2048, 0.20, 2),
+ ('8192mb_4core_200gb', 8192, 200, 2048, 0.40, 4),
+ ('16384mb_4core_400gb', 16384, 400, 4096, 0.80, 4),
+]
+
+import base64
+import random
+
+from libcloud.utils.py3 import urlparse, b
+from libcloud.common.base import ConnectionKey
+from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
+from libcloud.common.types import ProviderError
+from libcloud.compute.base import NodeImage, NodeSize, Node, NodeLocation
+from libcloud.compute.base import NodeDriver
+from libcloud.compute.types import Provider, NodeState
+
+
+class CloudFramesException(ProviderError):
+ pass
+
+
+class CloudFramesComponent(object):
+ """
+ Represents a node in the cloudapi path.
+ """
+
+ def __init__(self, cloudFramesConnection, name):
+ self.cloudFramesConnection = cloudFramesConnection
+ self.name = name
+
+ def __getattr__(self, key):
+ return self.method(key)
+
+ def method(self, methodname):
+ def foo(*args, **kwargs):
+ async = kwargs.get('async', False)
+ args = list(args)
+ args.append('') # jobguid
+ args.append({'wait': False} if async else {}) # executionparams
+ response = self.cloudFramesConnection.request(
+ 'cloud_api_%s.%s' % (self.name, methodname), *args)
+ if not response.success():
+ response.parse_error()
+ if async:
+ return response.parse_body()['jobguid']
+ else:
+ return response.parse_body()['result']
+ return foo
+
+
+class CloudFramesNodeSize(NodeSize):
+
+ def __init__(self, id, name, ram, disk, bandwidth, price, driver,
+ vcpus=None):
+ super(CloudFramesNodeSize, self).__init__(
+ id, name, ram, disk, bandwidth, price, driver)
+ self.vcpus = vcpus
+
+
+class CloudFramesNode(Node):
+
+ def list_snapshots(self):
+ return self.driver.ex_list_snapshots(self)
+
+ def snapshot(self, label='', description=''):
+ return self.driver.ex_snapshot_node(self, label, description)
+
+ def rollback(self, snapshot):
+ return self.driver.ex_rollback_node(self, snapshot)
+
+
+class CloudFramesSnapshot(object):
+
+ def __init__(self, id, timestamp, label, description, driver):
+ self.id = id
+ self.timestamp = timestamp
+ self.label = label
+ self.description = description
+ self.driver = driver
+
+ def destroy(self):
+ self.driver.ex_destroy_snapshot(self)
+
+
+class CloudFramesConnection(XMLRPCConnection, ConnectionKey):
+ """
+ Cloudapi connection class
+ """
+
+ repsonseCls = XMLRPCResponse
+ base_url = None
+
+ def __init__(self, key=None, secret=None, secure=True,
+ host=None, port=None, url=None, timeout=None):
+ """
+ :param key: The username to connect with to the cloudapi
+ :type key: ``str``
+
+ :param secret: The password to connect with to the cloudapi
+ :type secret: ``str``
+
+ :param secure: Should always be false at the moment
+ :type secure: ``bool``
+
+ :param host: The hostname of the cloudapi
+ :type host: ``str``
+
+ :param port: The port on which to connect to the cloudapi
+ :type port: ``int``
+
+ :param url: Url to the cloudapi (can replace all above)
+ :type url: ``str``
+ """
+
+ super(CloudFramesConnection, self).__init__(key=key, secure=secure,
+ host=host, port=port,
+ url=url, timeout=timeout)
+ self._auth = base64.b64encode(
+ b('%s:%s' % (key, secret))).decode('utf-8')
+ self.endpoint = url
+
+ def __getattr__(self, key):
+ return CloudFramesComponent(self, key)
+
+ def add_default_headers(self, headers):
+ headers['Authorization'] = 'Basic %s' % self._auth
+ return headers
+
+
+class CloudFramesNodeDriver(NodeDriver):
+ """
+ CloudFrames node driver
+ """
+
+ connectionCls = CloudFramesConnection
+
+ name = 'CloudFrames'
+ api_name = 'cloudframes'
+ website = 'http://www.cloudframes.net/'
+ type = Provider.CLOUDFRAMES
+
+ NODE_STATE_MAP = {
+ 'CONFIGURED': NodeState.PENDING,
+ 'CREATED': NodeState.PENDING,
+ 'DELETING': NodeState.PENDING,
+ 'HALTED': NodeState.TERMINATED,
+ 'IMAGEONLY': NodeState.UNKNOWN,
+ 'ISCSIEXPOSED': NodeState.PENDING,
+ 'MOVING': NodeState.PENDING,
+ 'OVERLOADED': NodeState.UNKNOWN,
+ 'PAUSED': NodeState.TERMINATED,
+ 'RUNNING': NodeState.RUNNING,
+ 'STARTING': NodeState.PENDING,
+ 'STOPPING': NodeState.PENDING,
+ 'SYNCING': NodeState.PENDING,
+ 'TODELETE': NodeState.PENDING,
+ }
+
+ # subclassed internal methods
+ def __init__(self, key=None, secret=None, secure=True,
+ host=None, port=None, url=None, **kwargs):
+ if not port:
+ port = 443 if secure else 80
+ if url:
+ if not url.endswith('/'):
+ url += '/'
+ scheme, netloc, _, _, _, _ = urlparse.urlparse(url)
+ secure = (scheme == 'https')
+ if '@' in netloc:
+ auth, hostport = netloc.rsplit('@', 1)
+ if ':' in auth:
+ key, secret = auth.split(':', 1)
+ else:
+ key = auth
+ else:
+ hostport = netloc
+ if ':' in hostport:
+ host, port = hostport.split(':')
+ else:
+ host = hostport
+ hostport = '%s:%s' % (host, port)
+ url = url.replace(netloc, hostport)
+ else:
+ url = '%s://%s:%s/appserver/xmlrpc/' % (
+ 'https' if secure else 'http', host, port)
+
+ if secure:
+ raise NotImplementedError(
+ 'The cloudapi only supports unsecure connections')
+
+ if key is None or secret is None:
+ raise NotImplementedError(
+ 'Unauthenticated support to the cloudapi is not supported')
+
+ # connection url
+ self._url = url
+
+ # cached attributes
+ self.__cloudspaceguid = None
+ self.__languid = None
+ self.__locations = []
+
+ super(CloudFramesNodeDriver, self).__init__(
+ key, secret, secure, host, port, **kwargs)
+
+ def _ex_connection_class_kwargs(self):
+ return {'url': self._url}
+
+ # internal methods
+ @property
+ def _cloudspaceguid(self):
+ if not self.__cloudspaceguid:
+ self.__cloudspaceguid = self.connection.cloudspace.find(
+ '', '', 'cloud', '')[0]
+ return self.__cloudspaceguid
+
+ @property
+ def _languid(self):
+ if not self.__languid:
+ self.__languid = self.connection.lan.find(
+ '', '', 'public_virtual', '', '', '', '', '', '', '', '', '',
+ '', '', '', '', '')[0]
+ return self.__languid
+
+ def _get_machine_data(self, guid):
+ """
+ Looks up some basic data related to the given machine guid.
+ """
+ try:
+ d = self.connection.machine.list('', '', '', guid, '')[0]
+ except IndexError:
+ raise CloudFramesException('VM no longer exists', 404, self)
+ d['public_ips'] = []
+ d['private_ips'] = []
+ d['size'] = None
+ d['image'] = None
+ return d
+
+ def _machine_find(self, template=False, machinetype=None,
+ machinerole=None):
+ # the cloudframes xmlrpc api requires you to pass all args and kwargs
+ # as positional arguments, you can't use keywords arguments
+ if not machinetype:
+ guids = []
+ for machinetype in ['VIRTUALSERVER', 'VIRTUALDESKTOP']:
+ guids += self.connection.machine.find(
+ '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+ '', '', machinetype, template, '', '', '', '', '', '', '',
+ '', '', '', '', '', '', '')
+ else:
+ guids = self.connection.machine.find(
+ '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
+ '', '', machinetype, '', '', '', '', '', '', '', '',
+ machinerole, '', '', '', '', '', '')
+ return guids
+
+ def _to_image(self, image_dict):
+ return NodeImage(id=image_dict['guid'],
+ name=image_dict['name'],
+ driver=self.connection.driver)
+
+ def _to_size(self, id, name, ram, disk, bandwidth, price, vcpus):
+ return CloudFramesNodeSize(
+ id, name, ram, disk, bandwidth, price, self, vcpus)
+
+ def _to_location(self, location_dict):
+ return NodeLocation(id=location_dict['guid'],
+ name=location_dict['name'],
+ country=None,
+ driver=self)
+
+ def _to_node(self, node_dict):
+ # only return nodes which can be worked with
+ # (ignore cloudframes internal autotests and deleted nodes)
+ if node_dict['status'] == 'CONFIGURED':
+ return None
+ return CloudFramesNode(id=node_dict['guid'],
+ name=node_dict['name'],
+ state=self.NODE_STATE_MAP.get(
+ node_dict['status'], NodeState.UNKNOWN),
+ public_ips=node_dict['public_ips'],
+ private_ips=node_dict['private_ips'],
+ driver=self.connection.driver,
+ size=node_dict['size'],
+ image=node_dict['image'],
+ extra={})
+
+ def _to_snapshot(self, snapshot_dict):
+ return CloudFramesSnapshot(id=snapshot_dict['guid'],
+ timestamp=snapshot_dict['timestamp'],
+ label=snapshot_dict['backuplabel'],
+ description=snapshot_dict['description'],
+ driver=self)
+
+ # subclassed public methods, and provider specific public methods
+ def list_images(self, location=None):
+ image_ids = self._machine_find(template=True)
+ image_list = []
+ for image_id in image_ids:
+ image_list.append(self._to_image(self._get_machine_data(image_id)))
+ return image_list
+
+ def list_sizes(self, location=None):
+ sizes = []
+ for id in range(len(SIZES)):
+ sizes.append(self._to_size(id, *SIZES[id]))
+ return sizes
+
+ def list_locations(self, ex_use_cached=True):
+ if not self.__locations or not ex_use_cached:
+ self.__locations = []
+ for location_id in self._machine_find(machinetype='PHYSICAL',
+ machinerole='COMPUTENODE'):
+ self.__locations.append(
+ self._to_location(self._get_machine_data(location_id)))
+ return self.__locations
+
+ def list_nodes(self):
+ node_ids = self._machine_find()
+ node_list = []
+ for node_id in node_ids:
+ node = self._to_node(self._get_machine_data(node_id))
+ if node:
+ node_list.append(node)
+ return node_list
+
+ def create_node(self, **kwargs):
+ """
+ Creates a new node, by cloning the template provided.
+
+ If no location object is passed, a random location will be used.
+
+
+ :param image: The template to be cloned (required)
+ :type image: ``list`` of :class:`NodeImage`
+
+ :param name: The name for the new node (required)
+ :type name: ``str``
+
+ :param size: The size of the new node (required)
+ :type size: ``list`` of :class:`NodeSize`
+
+ :param location: The location to create the new node
+ :type location: ``list`` of :class:`NodeLocation`
+
+ :param default_gateway: The default gateway to be used
+ :type default_gateway: ``str``
+
+ :param extra: Additional requirements (extra disks fi.)
+ :type extra: ``dict``
+
+
+ :returns: ``list`` of :class:`Node` -- The newly created Node object
+
+ :raises: CloudFramesException
+ """
+
+ additionalinfo = kwargs.get('extra', {})
+ additionalinfo.update({
+ 'memory': kwargs['size'].ram,
+ 'cpu': kwargs['size'].vcpus,
+ })
+ guid = self.connection.machine.createFromTemplate(
+ self._cloudspaceguid, kwargs['image'].id, kwargs['name'],
+ [{'languid': self._languid}], kwargs['name'],
+ kwargs.get('location', random.choice(self.list_locations())).id,
+ kwargs.get('default_gateway', ''), None, additionalinfo)
+ if not self.connection.machine.start(guid):
+ raise CloudFramesException(
+ 'failed to start machine after creation', 500, self)
+ return self._to_node(self._get_machine_data(guid))
+
+ def destroy_node(self, node):
+ return self.connection.machine.delete(node.id, False)
+
+ def reboot_node(self, node, ex_clean=True):
+ return self.connection.machine.reboot(node.id, ex_clean)
+
+ def ex_snapshot_node(self, node, label='', description=''):
+ guid = self.connection.machine.snapshot(
+ node.id, label, description, False, False, 'PAUSED')
+ for snapshot in self.ex_list_snapshots(node):
+ if snapshot.id == guid:
+ return snapshot
+ else:
+ raise CloudFramesException('Snapshot creation failed', 500, self)
+
+ def ex_rollback_node(self, node, snapshot):
+ if not node.state == NodeState.TERMINATED:
+ self.connection.machine.stop(node.id, False, 930)
+ success = self.connection.machine.rollback(node.id, snapshot.id)
+ self.connection.machine.start(node.id)
+ return success
+
+ def ex_list_snapshots(self, node):
+ return [self._to_snapshot(snapshot_dict) for snapshot_dict in
+ self.connection.machine.listSnapshots(node.id, False, '', '')]
+
+ def ex_destroy_snapshot(self, node, snapshot):
+ return self.connection.machine.delete(snapshot.id, False)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/cloudsigma.py b/awx/lib/site-packages/libcloud/compute/drivers/cloudsigma.py
new file mode 100644
index 0000000000..29b65cb21b
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/cloudsigma.py
@@ -0,0 +1,2093 @@
+# -*- coding: utf-8 -*-
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Drivers for CloudSigma API v1.0 and v2.0.
+"""
+
+import re
+import time
+import copy
+import base64
+
+try:
+ import simplejson as json
+except:
+ import json
+
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import httplib
+
+from libcloud.utils.misc import str2dicts, str2list, dict2str
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse, Response
+from libcloud.common.types import InvalidCredsError, ProviderError
+from libcloud.common.cloudsigma import INSTANCE_TYPES
+from libcloud.common.cloudsigma import API_ENDPOINTS_1_0
+from libcloud.common.cloudsigma import API_ENDPOINTS_2_0
+from libcloud.common.cloudsigma import DEFAULT_API_VERSION, DEFAULT_REGION
+from libcloud.compute.types import NodeState, Provider
+from libcloud.compute.base import NodeDriver, NodeSize, Node
+from libcloud.compute.base import NodeImage
+from libcloud.compute.base import is_private_subnet
+from libcloud.utils.iso8601 import parse_date
+from libcloud.utils.misc import get_secure_random_string
+
+__all__ = [
+ 'CloudSigmaNodeDriver',
+ 'CloudSigma_1_0_NodeDriver',
+ 'CloudSigma_2_0_NodeDriver',
+ 'CloudSigmaError',
+
+ 'CloudSigmaNodeSize',
+ 'CloudSigmaDrive',
+ 'CloudSigmaTag',
+ 'CloudSigmaSubscription',
+ 'CloudSigmaFirewallPolicy',
+ 'CloudSigmaFirewallPolicyRule'
+]
+
+
+class CloudSigmaNodeDriver(NodeDriver):
+ name = 'CloudSigma'
+ website = 'http://www.cloudsigma.com/'
+
+ def __new__(cls, key, secret=None, secure=True, host=None, port=None,
+ api_version=DEFAULT_API_VERSION, **kwargs):
+ if cls is CloudSigmaNodeDriver:
+ if api_version == '1.0':
+ cls = CloudSigma_1_0_NodeDriver
+ elif api_version == '2.0':
+ cls = CloudSigma_2_0_NodeDriver
+ else:
+ raise NotImplementedError('Unsupported API version: %s' %
+ (api_version))
+ return super(CloudSigmaNodeDriver, cls).__new__(cls)
+
+
+class CloudSigmaException(Exception):
+ def __str__(self):
+ return self.args[0]
+
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class CloudSigmaInsufficientFundsException(Exception):
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class CloudSigmaNodeSize(NodeSize):
+ def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver):
+ self.id = id
+ self.name = name
+ self.cpu = cpu
+ self.ram = ram
+ self.disk = disk
+ self.bandwidth = bandwidth
+ self.price = price
+ self.driver = driver
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.cpu, self.ram, self.disk,
+ self.bandwidth, self.price, self.driver.name))
+
+
+class CloudSigma_1_0_Response(Response):
+ def success(self):
+ if self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError()
+
+ return self.status >= 200 and self.status <= 299
+
+ def parse_body(self):
+ if not self.body:
+ return self.body
+
+ return str2dicts(self.body)
+
+ def parse_error(self):
+ return 'Error: %s' % (self.body.replace('errors:', '').strip())
+
+
+class CloudSigma_1_0_Connection(ConnectionUserAndKey):
+ host = API_ENDPOINTS_1_0[DEFAULT_REGION]['host']
+ responseCls = CloudSigma_1_0_Response
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'application/json'
+ headers['Content-Type'] = 'application/json'
+
+ headers['Authorization'] = 'Basic %s' % (base64.b64encode(
+ b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))
+ return headers
+
+
+class CloudSigma_1_0_NodeDriver(CloudSigmaNodeDriver):
+ type = Provider.CLOUDSIGMA
+ name = 'CloudSigma (API v1.0)'
+ website = 'http://www.cloudsigma.com/'
+ connectionCls = CloudSigma_1_0_Connection
+
+ IMAGING_TIMEOUT = 20 * 60 # Default timeout (in seconds) for the drive
+ # imaging process
+
+ NODE_STATE_MAP = {
+ 'active': NodeState.RUNNING,
+ 'stopped': NodeState.TERMINATED,
+ 'dead': NodeState.TERMINATED,
+ 'dumped': NodeState.TERMINATED,
+ }
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region=DEFAULT_REGION, **kwargs):
+ if region not in API_ENDPOINTS_1_0:
+ raise ValueError('Invalid region: %s' % (region))
+
+ self._host_argument_set = host is not None
+ self.api_name = 'cloudsigma_%s' % (region)
+ super(CloudSigma_1_0_NodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure,
+ host=host,
+ port=port,
+ region=region,
+ **kwargs)
+
+ def reboot_node(self, node):
+ """
+ Reboot a node.
+
+ Because Cloudsigma API does not provide native reboot call,
+ it's emulated using stop and start.
+
+ @inherits: :class:`NodeDriver.reboot_node`
+ """
+ node = self._get_node(node.id)
+ state = node.state
+
+ if state == NodeState.RUNNING:
+ stopped = self.ex_stop_node(node)
+ else:
+ stopped = True
+
+ if not stopped:
+ raise CloudSigmaException(
+ 'Could not stop node with id %s' % (node.id))
+
+ success = self.ex_start_node(node)
+
+ return success
+
+ def destroy_node(self, node):
+ """
+ Destroy a node (all the drives associated with it are NOT destroyed).
+
+ If a node is still running, it's stopped before it's destroyed.
+
+ @inherits: :class:`NodeDriver.destroy_node`
+ """
+ node = self._get_node(node.id)
+ state = node.state
+
+ # Node cannot be destroyed while running so it must be stopped first
+ if state == NodeState.RUNNING:
+ stopped = self.ex_stop_node(node)
+ else:
+ stopped = True
+
+ if not stopped:
+ raise CloudSigmaException(
+ 'Could not stop node with id %s' % (node.id))
+
+ response = self.connection.request(
+ action='/servers/%s/destroy' % (node.id),
+ method='POST')
+ return response.status == 204
+
+ def list_images(self, location=None):
+ """
+ Return a list of available standard images (this call might take up
+ to 15 seconds to return).
+
+ @inherits: :class:`NodeDriver.list_images`
+ """
+ response = self.connection.request(
+ action='/drives/standard/info').object
+
+ images = []
+ for value in response:
+ if value.get('type'):
+ if value['type'] == 'disk':
+ image = NodeImage(id=value['drive'], name=value['name'],
+ driver=self.connection.driver,
+ extra={'size': value['size']})
+ images.append(image)
+
+ return images
+
+ def list_sizes(self, location=None):
+ sizes = []
+ for value in INSTANCE_TYPES:
+ key = value['id']
+ size = CloudSigmaNodeSize(id=value['id'], name=value['name'],
+ cpu=value['cpu'], ram=value['memory'],
+ disk=value['disk'],
+ bandwidth=value['bandwidth'],
+ price=self._get_size_price(size_id=key),
+ driver=self.connection.driver)
+ sizes.append(size)
+
+ return sizes
+
+ def list_nodes(self):
+ response = self.connection.request(action='/servers/info').object
+
+ nodes = []
+ for data in response:
+ node = self._to_node(data)
+ if node:
+ nodes.append(node)
+ return nodes
+
+ def create_node(self, **kwargs):
+ """
+ Creates a CloudSigma instance
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :keyword smp: Number of virtual processors or None to calculate
+ based on the cpu speed
+ :type smp: ``int``
+
+ :keyword nic_model: e1000, rtl8139 or virtio (is not specified,
+ e1000 is used)
+ :type nic_model: ``str``
+
+ :keyword vnc_password: If not set, VNC access is disabled.
+ :type vnc_password: ``bool``
+
+ :keyword drive_type: Drive type (ssd|hdd). Defaults to hdd.
+ :type drive_type: ``str``
+ """
+ size = kwargs['size']
+ image = kwargs['image']
+ smp = kwargs.get('smp', 'auto')
+ nic_model = kwargs.get('nic_model', 'e1000')
+ vnc_password = kwargs.get('vnc_password', None)
+ drive_type = kwargs.get('drive_type', 'hdd')
+
+ if nic_model not in ['e1000', 'rtl8139', 'virtio']:
+ raise CloudSigmaException('Invalid NIC model specified')
+
+ if drive_type not in ['hdd', 'ssd']:
+ raise CloudSigmaException('Invalid drive type "%s". Valid types'
+ ' are: hdd, ssd' % (drive_type))
+
+ drive_data = {}
+ drive_data.update({'name': kwargs['name'],
+ 'size': '%sG' % (kwargs['size'].disk),
+ 'driveType': drive_type})
+
+ response = self.connection.request(
+ action='/drives/%s/clone' % image.id,
+ data=dict2str(drive_data),
+ method='POST').object
+
+ if not response:
+ raise CloudSigmaException('Drive creation failed')
+
+ drive_uuid = response[0]['drive']
+
+ response = self.connection.request(
+ action='/drives/%s/info' % (drive_uuid)).object
+ imaging_start = time.time()
+ while 'imaging' in response[0]:
+ response = self.connection.request(
+ action='/drives/%s/info' % (drive_uuid)).object
+ elapsed_time = time.time() - imaging_start
+ timed_out = elapsed_time >= self.IMAGING_TIMEOUT
+ if 'imaging' in response[0] and timed_out:
+ raise CloudSigmaException('Drive imaging timed out')
+ time.sleep(1)
+
+ node_data = {}
+ node_data.update(
+ {'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram,
+ 'ide:0:0': drive_uuid, 'boot': 'ide:0:0', 'smp': smp})
+ node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'})
+
+ if vnc_password:
+ node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password})
+
+ response = self.connection.request(action='/servers/create',
+ data=dict2str(node_data),
+ method='POST').object
+
+ if not isinstance(response, list):
+ response = [response]
+
+ node = self._to_node(response[0])
+ if node is None:
+ # Insufficient funds, destroy created drive
+ self.ex_drive_destroy(drive_uuid)
+ raise CloudSigmaInsufficientFundsException(
+ 'Insufficient funds, node creation failed')
+
+ # Start the node after it has been created
+ started = self.ex_start_node(node)
+
+ if started:
+ node.state = NodeState.RUNNING
+
+ return node
+
+ def ex_destroy_node_and_drives(self, node):
+ """
+ Destroy a node and all the drives associated with it.
+
+ :param node: Node which should be used
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :rtype: ``bool``
+ """
+ node = self._get_node_info(node)
+
+ drive_uuids = []
+ for key, value in node.items():
+ if (key.startswith('ide:') or key.startswith(
+ 'scsi') or key.startswith('block')) and\
+ not (key.endswith(':bytes') or
+ key.endswith(':requests') or key.endswith('media')):
+ drive_uuids.append(value)
+
+ node_destroyed = self.destroy_node(self._to_node(node))
+
+ if not node_destroyed:
+ return False
+
+ for drive_uuid in drive_uuids:
+ self.ex_drive_destroy(drive_uuid)
+
+ return True
+
+ def ex_static_ip_list(self):
+ """
+ Return a list of available static IP addresses.
+
+ :rtype: ``list`` of ``str``
+ """
+ response = self.connection.request(action='/resources/ip/list',
+ method='GET')
+
+ if response.status != 200:
+ raise CloudSigmaException('Could not retrieve IP list')
+
+ ips = str2list(response.body)
+ return ips
+
+ def ex_drives_list(self):
+ """
+ Return a list of all the available drives.
+
+ :rtype: ``list`` of ``dict``
+ """
+ response = self.connection.request(action='/drives/info', method='GET')
+
+ result = str2dicts(response.body)
+ return result
+
+ def ex_static_ip_create(self):
+ """
+ Create a new static IP address.p
+
+ :rtype: ``list`` of ``dict``
+ """
+ response = self.connection.request(action='/resources/ip/create',
+ method='GET')
+
+ result = str2dicts(response.body)
+ return result
+
+ def ex_static_ip_destroy(self, ip_address):
+ """
+ Destroy a static IP address.
+
+ :param ip_address: IP address which should be used
+ :type ip_address: ``str``
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/resources/ip/%s/destroy' % (ip_address), method='GET')
+
+ return response.status == 204
+
+ def ex_drive_destroy(self, drive_uuid):
+ """
+ Destroy a drive with a specified uuid.
+ If the drive is currently mounted an exception is thrown.
+
+ :param drive_uuid: Drive uuid which should be used
+ :type drive_uuid: ``str``
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/drives/%s/destroy' % (drive_uuid), method='POST')
+
+ return response.status == 204
+
+ def ex_set_node_configuration(self, node, **kwargs):
+ """
+ Update a node configuration.
+ Changing most of the parameters requires node to be stopped.
+
+ :param node: Node which should be used
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :param kwargs: keyword arguments
+ :type kwargs: ``dict``
+
+ :rtype: ``bool``
+ """
+ valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$',
+ '^boot$', '^nic:0:model$', '^nic:0:dhcp',
+ '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$',
+ '^vnc:ip$', '^vnc:password$', '^vnc:tls',
+ '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$',
+ '^block:[0-7](:media)?$')
+
+ invalid_keys = []
+ keys = list(kwargs.keys())
+ for key in keys:
+ matches = False
+ for regex in valid_keys:
+ if re.match(regex, key):
+ matches = True
+ break
+ if not matches:
+ invalid_keys.append(key)
+
+ if invalid_keys:
+ raise CloudSigmaException(
+ 'Invalid configuration key specified: %s' %
+ (',' .join(invalid_keys)))
+
+ response = self.connection.request(
+ action='/servers/%s/set' % (node.id),
+ data=dict2str(kwargs),
+ method='POST')
+
+ return (response.status == 200 and response.body != '')
+
+ def ex_start_node(self, node):
+ """
+ Start a node.
+
+ :param node: Node which should be used
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/servers/%s/start' % (node.id),
+ method='POST')
+
+ return response.status == 200
+
+ def ex_stop_node(self, node):
+ """
+ Stop (shutdown) a node.
+
+ :param node: Node which should be used
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/servers/%s/stop' % (node.id),
+ method='POST')
+ return response.status == 204
+
+ def ex_shutdown_node(self, node):
+ """
+ Stop (shutdown) a node.
+
+ @inherits: :class:`CloudSigmaBaseNodeDriver.ex_stop_node`
+ """
+ return self.ex_stop_node(node)
+
+ def ex_destroy_drive(self, drive_uuid):
+ """
+ Destroy a drive.
+
+ :param drive_uuid: Drive uuid which should be used
+ :type drive_uuid: ``str``
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/drives/%s/destroy' % (drive_uuid),
+ method='POST')
+ return response.status == 204
+
+ def _ex_connection_class_kwargs(self):
+ """
+ Return the host value based on the user supplied region.
+ """
+ kwargs = {}
+ if not self._host_argument_set:
+ kwargs['host'] = API_ENDPOINTS_1_0[self.region]['host']
+
+ return kwargs
+
+ def _to_node(self, data):
+ if data:
+ try:
+ state = self.NODE_STATE_MAP[data['status']]
+ except KeyError:
+ state = NodeState.UNKNOWN
+
+ if 'server' not in data:
+ # Response does not contain server UUID if the server
+ # creation failed because of insufficient funds.
+ return None
+
+ public_ips = []
+ if 'nic:0:dhcp' in data:
+ if isinstance(data['nic:0:dhcp'], list):
+ public_ips = data['nic:0:dhcp']
+ else:
+ public_ips = [data['nic:0:dhcp']]
+
+ extra = {}
+ extra_keys = [('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'),
+ ('status', 'str')]
+ for key, value_type in extra_keys:
+ if key in data:
+ value = data[key]
+
+ if value_type == 'int':
+ value = int(value)
+ elif value_type == 'auto':
+ try:
+ value = int(value)
+ except ValueError:
+ pass
+
+ extra.update({key: value})
+
+ if 'vnc:ip' in data and 'vnc:password' in data:
+ extra.update({'vnc_ip': data['vnc:ip'],
+ 'vnc_password': data['vnc:password']})
+
+ node = Node(id=data['server'], name=data['name'], state=state,
+ public_ips=public_ips, private_ips=None,
+ driver=self.connection.driver,
+ extra=extra)
+
+ return node
+ return None
+
+ def _get_node(self, node_id):
+ nodes = self.list_nodes()
+ node = [node for node in nodes if node.id == node.id]
+
+ if not node:
+ raise CloudSigmaException(
+ 'Node with id %s does not exist' % (node_id))
+
+ return node[0]
+
+ def _get_node_info(self, node):
+ response = self.connection.request(
+ action='/servers/%s/info' % (node.id))
+
+ result = str2dicts(response.body)
+ return result[0]
+
+
+class CloudSigmaZrhConnection(CloudSigma_1_0_Connection):
+ """
+ Connection class for the CloudSigma driver for the Zurich end-point
+ """
+ host = API_ENDPOINTS_1_0['zrh']['host']
+
+
+class CloudSigmaZrhNodeDriver(CloudSigma_1_0_NodeDriver):
+ """
+ CloudSigma node driver for the Zurich end-point
+ """
+ connectionCls = CloudSigmaZrhConnection
+ api_name = 'cloudsigma_zrh'
+
+
+class CloudSigmaLvsConnection(CloudSigma_1_0_Connection):
+ """
+ Connection class for the CloudSigma driver for the Las Vegas end-point
+ """
+ host = API_ENDPOINTS_1_0['lvs']['host']
+
+
+class CloudSigmaLvsNodeDriver(CloudSigma_1_0_NodeDriver):
+ """
+ CloudSigma node driver for the Las Vegas end-point
+ """
+ connectionCls = CloudSigmaLvsConnection
+ api_name = 'cloudsigma_lvs'
+
+
+class CloudSigmaError(ProviderError):
+ """
+ Represents CloudSigma API error.
+ """
+
+ def __init__(self, http_code, error_type, error_msg, error_point, driver):
+ """
+ :param http_code: HTTP status code.
+ :type http_code: ``int``
+
+ :param error_type: Type of error (validation / notexist / backend /
+ permissions database / concurrency / billing /
+ payment)
+ :type error_type: ``str``
+
+ :param error_msg: A description of the error that occurred.
+ :type error_msg: ``str``
+
+ :param error_point: Point at which the error occurred. Can be None.
+ :type error_point: ``str`` or ``None``
+ """
+ super(CloudSigmaError, self).__init__(http_code=http_code,
+ value=error_msg, driver=driver)
+ self.error_type = error_type
+ self.error_msg = error_msg
+ self.error_point = error_point
+
+
+class CloudSigmaSubscription(object):
+ """
+ Represents CloudSigma subscription.
+ """
+
+ def __init__(self, id, resource, amount, period, status, price, start_time,
+ end_time, auto_renew, subscribed_object=None):
+ """
+ :param id: Subscription ID.
+ :type id: ``str``
+
+ :param resource: Resource (e.g vlan, ip, etc.).
+ :type resource: ``str``
+
+ :param period: Subscription period.
+ :type period: ``str``
+
+ :param status: Subscription status (active / inactive).
+ :type status: ``str``
+
+ :param price: Subscription price.
+ :type price: ``str``
+
+ :param start_time: Start time for this subscription.
+ :type start_time: ``datetime.datetime``
+
+ :param end_time: End time for this subscription.
+ :type end_time: ``datetime.datetime``
+
+ :param auto_renew: True if the subscription is auto renewed.
+ :type auto_renew: ``bool``
+
+ :param subscribed_object: Optional UUID of the subscribed object.
+ :type subscribed_object: ``str``
+ """
+ self.id = id
+ self.resource = resource
+ self.amount = amount
+ self.period = period
+ self.status = status
+ self.price = price
+ self.start_time = start_time
+ self.end_time = end_time
+ self.auto_renew = auto_renew
+ self.subscribed_object = subscribed_object
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ('' %
+ (self.id, self.resource, self.amount, self.period,
+ self.subscribed_object))
+
+
+class CloudSigmaTag(object):
+ """
+ Represents a CloudSigma tag object.
+ """
+
+ def __init__(self, id, name, resources=None):
+ """
+ :param id: Tag ID.
+ :type id: ``str``
+
+ :param name: Tag name.
+ :type name: ``str``
+
+ :param resource: IDs of resources which are associated with this tag.
+ :type resources: ``list`` of ``str``
+ """
+ self.id = id
+ self.name = name
+ self.resources = resources if resources else []
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ('' %
+ (self.id, self.name, repr(self.resources)))
+
+
+class CloudSigmaDrive(NodeImage):
+ """
+ Represents a CloudSigma drive.
+ """
+
+ def __init__(self, id, name, size, media, status, driver, extra=None):
+ """
+ :param id: Drive ID.
+ :type id: ``str``
+
+ :param name: Drive name.
+ :type name: ``str``
+
+ :param size: Drive size (in bytes).
+ :type size: ``int``
+
+ :param media: Drive media (cdrom / disk).
+ :type media: ``str``
+
+ :param status: Drive status (unmounted / mounted).
+ :type status: ``str``
+ """
+ super(CloudSigmaDrive, self).__init__(id=id, name=name, driver=driver,
+ extra=extra)
+ self.size = size
+ self.media = media
+ self.status = status
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return (('') %
+ (self.id, self.name, self.size, self.media, self.status))
+
+
+class CloudSigmaFirewallPolicy(object):
+ """
+ Represents a CloudSigma firewall policy.
+ """
+
+ def __init__(self, id, name, rules):
+ """
+ :param id: Policy ID.
+ :type id: ``str``
+
+ :param name: Policy name.
+ :type name: ``str``
+
+ :param rules: Rules associated with this policy.
+ :type rules: ``list`` of :class:`.CloudSigmaFirewallPolicyRule` objects
+ """
+ self.id = id
+ self.name = name
+ self.rules = rules if rules else []
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return (('') %
+ (self.id, self.name, repr(self.rules)))
+
+
+class CloudSigmaFirewallPolicyRule(object):
+ """
+ Represents a CloudSigma firewall policy rule.
+ """
+
+ def __init__(self, action, direction, ip_proto=None, src_ip=None,
+ src_port=None, dst_ip=None, dst_port=None, comment=None):
+ """
+ :param action: Action (drop / accept).
+ :type action: ``str``
+
+ :param direction: Rule direction (in / out / both)>
+ :type direction: ``str``
+
+ :param ip_proto: IP protocol (tcp / udp).
+ :type ip_proto: ``str``.
+
+ :param src_ip: Source IP in CIDR notation.
+ :type src_ip: ``str``
+
+ :param src_port: Source port or a port range.
+ :type src_port: ``str``
+
+ :param dst_ip: Destination IP in CIDR notation.
+ :type dst_ip: ``str``
+
+ :param src_port: Destination port or a port range.
+ :type src_port: ``str``
+
+ :param comment: Comment associated with the policy.
+ :type comment: ``str``
+ """
+ self.action = action
+ self.direction = direction
+ self.ip_proto = ip_proto
+ self.src_ip = src_ip
+ self.src_port = src_port
+ self.dst_ip = dst_ip
+ self.dst_port = dst_port
+ self.comment = comment
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return (('') %
+ (self.action, self.direction))
+
+
+class CloudSigma_2_0_Response(JsonResponse):
+ success_status_codes = [
+ httplib.OK,
+ httplib.ACCEPTED,
+ httplib.NO_CONTENT,
+ httplib.CREATED
+ ]
+
+ def success(self):
+ return self.status in self.success_status_codes
+
+ def parse_error(self):
+ if int(self.status) == httplib.UNAUTHORIZED:
+ raise InvalidCredsError('Invalid credentials')
+
+ body = self.parse_body()
+ errors = self._parse_errors_from_body(body=body)
+
+ if errors:
+ # Throw first error
+ raise errors[0]
+
+ return body
+
+ def _parse_errors_from_body(self, body):
+ """
+ Parse errors from the response body.
+
+ :return: List of error objects.
+ :rtype: ``list`` of :class:`.CloudSigmaError` objects
+ """
+ errors = []
+
+ if not isinstance(body, list):
+ return None
+
+ for item in body:
+ if 'error_type' not in item:
+ # Unrecognized error
+ continue
+
+ error = CloudSigmaError(http_code=self.status,
+ error_type=item['error_type'],
+ error_msg=item['error_message'],
+ error_point=item['error_point'],
+ driver=self.connection.driver)
+ errors.append(error)
+
+ return errors
+
+
+class CloudSigma_2_0_Connection(ConnectionUserAndKey):
+ host = API_ENDPOINTS_2_0[DEFAULT_REGION]['host']
+ responseCls = CloudSigma_2_0_Response
+ api_prefix = '/api/2.0'
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'application/json'
+ headers['Content-Type'] = 'application/json'
+
+ headers['Authorization'] = 'Basic %s' % (base64.b64encode(
+ b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))
+ return headers
+
+ def encode_data(self, data):
+ data = json.dumps(data)
+ return data
+
+ def request(self, action, params=None, data=None, headers=None,
+ method='GET', raw=False):
+ params = params or {}
+ action = self.api_prefix + action
+
+ if method == 'GET':
+ params['limit'] = 0 # we want all the items back
+
+ return super(CloudSigma_2_0_Connection, self).request(action=action,
+ params=params,
+ data=data,
+ headers=headers,
+ method=method,
+ raw=raw)
+
+
+class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver):
+ """
+ Driver for CloudSigma API v2.0.
+ """
+ name = 'CloudSigma (API v2.0)'
+ api_name = 'cloudsigma_zrh'
+ website = 'http://www.cloudsigma.com/'
+ connectionCls = CloudSigma_2_0_Connection
+
+ # Default drive transition timeout in seconds
+ DRIVE_TRANSITION_TIMEOUT = 500
+
+ # How long to sleep between different polling periods while waiting for
+ # drive transition
+ DRIVE_TRANSITION_SLEEP_INTERVAL = 5
+
+ NODE_STATE_MAP = {
+ 'starting': NodeState.PENDING,
+ 'stopping': NodeState.PENDING,
+ 'unavailable': NodeState.PENDING,
+ 'running': NodeState.RUNNING,
+ 'stopped': NodeState.STOPPED,
+ 'paused': NodeState.STOPPED
+ }
+
+ def __init__(self, key, secret, secure=True, host=None, port=None,
+ region=DEFAULT_REGION, **kwargs):
+ if region not in API_ENDPOINTS_2_0:
+ raise ValueError('Invalid region: %s' % (region))
+
+ if not secure:
+ # CloudSigma drive uses Basic Auth authentication and we don't want
+ # to allow user to accidentally send credentials over the wire in
+ # plain-text
+ raise ValueError('CloudSigma driver only supports a '
+ 'secure connection')
+
+ self._host_argument_set = host is not None
+ super(CloudSigma_2_0_NodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure,
+ host=host, port=port,
+ region=region,
+ **kwargs)
+
+ def list_nodes(self, ex_tag=None):
+ """
+ List available nodes.
+
+ :param ex_tag: If specified, only return servers tagged with the
+ provided tag.
+ :type ex_tag: :class:`CloudSigmaTag`
+ """
+ if ex_tag:
+ action = '/tags/%s/servers/detail/' % (ex_tag.id)
+ else:
+ action = '/servers/detail/'
+
+ response = self.connection.request(action=action, method='GET').object
+ nodes = [self._to_node(data=item) for item in response['objects']]
+ return nodes
+
+ def list_sizes(self):
+ """
+ List available sizes.
+ """
+ sizes = []
+ for value in INSTANCE_TYPES:
+ key = value['id']
+ size = CloudSigmaNodeSize(id=value['id'], name=value['name'],
+ cpu=value['cpu'], ram=value['memory'],
+ disk=value['disk'],
+ bandwidth=value['bandwidth'],
+ price=self._get_size_price(size_id=key),
+ driver=self.connection.driver)
+ sizes.append(size)
+
+ return sizes
+
+ def list_images(self):
+ """
+ Return a list of available pre-installed library drives.
+
+ Note: If you want to list all the available library drives (both
+ pre-installed and installation CDs), use :meth:`ex_list_library_drives`
+ method.
+ """
+ response = self.connection.request(action='/libdrives/').object
+ images = [self._to_image(data=item) for item in response['objects']]
+
+ # We filter out non pre-installed library drives by default because
+ # they can't be used directly following a default Libcloud server
+ # creation flow.
+ images = [image for image in images if
+ image.extra['image_type'] == 'preinst']
+ return images
+
+ def create_node(self, name, size, image, ex_metadata=None,
+ ex_vnc_password=None, ex_avoid=None, ex_vlan=None):
+ """
+ Create a new server.
+
+ Server creation consists multiple steps depending on the type of the
+ image used.
+
+ 1. Installation CD:
+
+ 1. Create a server and attach installation cd
+ 2. Start a server
+
+ 2. Pre-installed image:
+
+ 1. Clone provided library drive so we can use it
+ 2. Resize cloned drive to the desired size
+ 3. Create a server and attach cloned drive
+ 4. Start a server
+
+ :param ex_metadata: Key / value pairs to associate with the
+ created node. (optional)
+ :type ex_metadata: ``dict``
+
+ :param ex_vnc_password: Password to use for VNC access. If not
+ provided, random password is generated.
+ :type ex_vnc_password: ``str``
+
+ :param ex_avoid: A list of server UUIDs to avoid when starting this
+ node. (optional)
+ :type ex_avoid: ``list``
+
+ :param ex_vlan: Optional UUID of a VLAN network to use. If specified,
+ server will have two nics assigned - 1 with a public ip
+ and 1 with the provided VLAN.
+ :type ex_vlan: ``str``
+ """
+ is_installation_cd = self._is_installation_cd(image=image)
+
+ if ex_vnc_password:
+ vnc_password = ex_vnc_password
+ else:
+ # VNC password is not provided, generate a random one.
+ vnc_password = get_secure_random_string(size=12)
+
+ drive_name = '%s-drive' % (name)
+
+ # size is specified in GB
+ drive_size = (size.disk * 1024 * 1024 * 1024)
+
+ if not is_installation_cd:
+ # 1. Clone library drive so we can use it
+ drive = self.ex_clone_drive(drive=image, name=drive_name)
+
+ # Wait for drive clone to finish
+ drive = self._wait_for_drive_state_transition(drive=drive,
+ state='unmounted')
+
+ # 2. Resize drive to the desired disk size if the desired disk size
+ # is larger than the cloned drive size.
+ if drive_size > drive.size:
+ drive = self.ex_resize_drive(drive=drive, size=drive_size)
+
+ # Wait for drive resize to finish
+ drive = self._wait_for_drive_state_transition(drive=drive,
+ state='unmounted')
+ else:
+ # No need to clone installation CDs
+ drive = image
+
+ # 3. Create server and attach cloned drive
+ # ide 0:0
+ data = {}
+ data['name'] = name
+ data['cpu'] = size.cpu
+ data['mem'] = (size.ram * 1024 * 1024)
+ data['vnc_password'] = vnc_password
+
+ if ex_metadata:
+ data['meta'] = ex_metadata
+
+ # Assign 1 public interface (DHCP) to the node
+ nic = {
+ 'boot_order': None,
+ 'ip_v4_conf': {
+ 'conf': 'dhcp',
+ },
+ 'ip_v6_conf': None
+ }
+
+ nics = [nic]
+
+ if ex_vlan:
+ # Assign another interface for VLAN
+ nic = {
+ 'boot_order': None,
+ 'ip_v4_conf': None,
+ 'ip_v6_conf': None,
+ 'vlan': ex_vlan
+ }
+ nics.append(nic)
+
+ # Need to use IDE for installation CDs
+ if is_installation_cd:
+ device_type = 'ide'
+ else:
+ device_type = 'virtio'
+
+ drive = {
+ 'boot_order': 1,
+ 'dev_channel': '0:0',
+ 'device': device_type,
+ 'drive': drive.id
+ }
+
+ drives = [drive]
+
+ data['nics'] = nics
+ data['drives'] = drives
+
+ action = '/servers/'
+ response = self.connection.request(action=action, method='POST',
+ data=data)
+ node = self._to_node(response.object['objects'][0])
+
+ # 4. Start server
+ self.ex_start_node(node=node, ex_avoid=ex_avoid)
+
+ return node
+
+ def destroy_node(self, node):
+ """
+ Destroy the node and all the associated drives.
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ action = '/servers/%s/' % (node.id)
+ params = {'recurse': 'all_drives'}
+ response = self.connection.request(action=action, method='DELETE',
+ params=params)
+ return response.status == httplib.NO_CONTENT
+
+ # Server extension methods
+
+ def ex_edit_node(self, node, params):
+ """
+ Edit a node.
+
+ :param node: Node to edit.
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :param params: Node parameters to update.
+ :type params: ``dict``
+
+ :return Edited node.
+ :rtype: :class:`libcloud.compute.base.Node`
+ """
+ data = {}
+
+ # name, cpu, mem and vnc_password attributes must always be present so
+ # we just copy them from the to-be-edited node
+ data['name'] = node.name
+ data['cpu'] = node.extra['cpu']
+ data['mem'] = node.extra['mem']
+ data['vnc_password'] = node.extra['vnc_password']
+
+ nics = copy.deepcopy(node.extra.get('nics', []))
+
+ data['nics'] = nics
+
+ data.update(params)
+
+ action = '/servers/%s/' % (node.id)
+ response = self.connection.request(action=action, method='PUT',
+ data=data).object
+ node = self._to_node(data=response)
+ return node
+
+ def ex_start_node(self, node, ex_avoid=None):
+ """
+ Start a node.
+
+ :param node: Node to start.
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :param ex_avoid: A list of other server uuids to avoid when
+ starting this node. If provided, node will
+ attempt to be started on a different
+ physical infrastructure from other servers
+ specified using this argument. (optional)
+ :type ex_avoid: ``list``
+ """
+ params = {}
+
+ if ex_avoid:
+ params['avoid'] = ','.join(ex_avoid)
+
+ path = '/servers/%s/action/' % (node.id)
+ response = self._perform_action(path=path, action='start',
+ params=params,
+ method='POST')
+ return response.status == httplib.ACCEPTED
+
+ def ex_stop_node(self, node):
+ """
+ Stop a node.
+ """
+ path = '/servers/%s/action/' % (node.id)
+ response = self._perform_action(path=path, action='stop',
+ method='POST')
+ return response.status == httplib.ACCEPTED
+
+ def ex_clone_node(self, node, name=None, random_vnc_password=None):
+ """
+ Clone the provided node.
+
+ :param name: Optional name for the cloned node.
+ :type name: ``str``
+ :param random_vnc_password: If True, a new random VNC password will be
+ generated for the cloned node. Otherwise
+ password from the cloned node will be
+ reused.
+ :type random_vnc_password: ``bool``
+
+ :return: Cloned node.
+ :rtype: :class:`libcloud.compute.base.Node`
+ """
+ data = {}
+
+ data['name'] = name
+ data['random_vnc_password'] = random_vnc_password
+
+ path = '/servers/%s/action/' % (node.id)
+ response = self._perform_action(path=path, action='clone',
+ method='POST', data=data).object
+ node = self._to_node(data=response)
+ return node
+
+ def ex_open_vnc_tunnel(self, node):
+ """
+ Open a VNC tunnel to the provided node and return the VNC url.
+
+ :param node: Node to open the VNC tunnel to.
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :return: URL of the opened VNC tunnel.
+ :rtype: ``str``
+ """
+ path = '/servers/%s/action/' % (node.id)
+ response = self._perform_action(path=path, action='open_vnc',
+ method='POST').object
+ vnc_url = response['vnc_url']
+ return vnc_url
+
+ def ex_close_vnc_tunnel(self, node):
+ """
+ Close a VNC server to the provided node.
+
+ :param node: Node to close the VNC tunnel to.
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ path = '/servers/%s/action/' % (node.id)
+ response = self._perform_action(path=path, action='close_vnc',
+ method='POST')
+ return response.status == httplib.ACCEPTED
+
+ # Drive extension methods
+
+ def ex_list_library_drives(self):
+ """
+ Return a list of all the available library drives (pre-installed and
+ installation CDs).
+
+ :rtype: ``list`` of :class:`.CloudSigmaDrive` objects
+ """
+ response = self.connection.request(action='/libdrives/').object
+ drives = [self._to_drive(data=item) for item in response['objects']]
+ return drives
+
+ def ex_list_user_drives(self):
+ """
+ Return a list of all the available user's drives.
+
+ :rtype: ``list`` of :class:`.CloudSigmaDrive` objects
+ """
+ response = self.connection.request(action='/drives/detail/').object
+ drives = [self._to_drive(data=item) for item in response['objects']]
+ return drives
+
+ def ex_create_drive(self, name, size, media='disk', ex_avoid=None):
+ """
+ Create a new drive.
+
+ :param name: Drive name.
+ :type name: ``str``
+
+ :param size: Drive size in bytes.
+ :type size: ``int``
+
+ :param media: Drive media type (cdrom, disk).
+ :type media: ``str``
+
+ :param ex_avoid: A list of other drive uuids to avoid when
+ creating this drive. If provided, drive will
+ attempt to be created on a different
+ physical infrastructure from other drives
+ specified using this argument. (optional)
+ :type ex_avoid: ``list``
+
+ :return: Created drive object.
+ :rtype: :class:`.CloudSigmaDrive`
+ """
+ params = {}
+ data = {
+ 'name': name,
+ 'size': size,
+ 'media': media
+ }
+
+ if ex_avoid:
+ params['avoid'] = ','.join(ex_avoid)
+
+ action = '/drives/'
+ response = self.connection.request(action=action, method='POST',
+ params=params, data=data).object
+ drive = self._to_drive(data=response['objects'][0])
+ return drive
+
+ def ex_clone_drive(self, drive, name=None, ex_avoid=None):
+ """
+ Clone a library or a standard drive.
+
+ :param drive: Drive to clone.
+ :type drive: :class:`libcloud.compute.base.NodeImage` or
+ :class:`.CloudSigmaDrive`
+
+ :param name: Optional name for the cloned drive.
+ :type name: ``str``
+
+ :param ex_avoid: A list of other drive uuids to avoid when
+ creating this drive. If provided, drive will
+ attempt to be created on a different
+ physical infrastructure from other drives
+ specified using this argument. (optional)
+ :type ex_avoid: ``list``
+
+ :return: New cloned drive.
+ :rtype: :class:`.CloudSigmaDrive`
+ """
+ params = {}
+ data = {}
+
+ if ex_avoid:
+ params['avoid'] = ','.join(ex_avoid)
+
+ if name:
+ data['name'] = name
+
+ path = '/drives/%s/action/' % (drive.id)
+ response = self._perform_action(path=path, action='clone',
+ params=params, data=data,
+ method='POST')
+ drive = self._to_drive(data=response.object['objects'][0])
+ return drive
+
+ def ex_resize_drive(self, drive, size):
+ """
+ Resize a drive.
+
+ :param drive: Drive to resize.
+
+ :param size: New drive size in bytes.
+ :type size: ``int``
+
+ :return: Drive object which is being resized.
+ :rtype: :class:`.CloudSigmaDrive`
+ """
+ path = '/drives/%s/action/' % (drive.id)
+ data = {'name': drive.name, 'size': size, 'media': 'disk'}
+ response = self._perform_action(path=path, action='resize',
+ method='POST', data=data)
+
+ drive = self._to_drive(data=response.object['objects'][0])
+ return drive
+
+ def ex_attach_drive(self, node):
+ """
+ Attach a drive to the provided node.
+ """
+ # TODO
+ pass
+
+ def ex_get_drive(self, drive_id):
+ """
+ Retrieve information about a single drive.
+
+ :param drive_id: ID of the drive to retrieve.
+ :type drive_id: ``str``
+
+ :return: Drive object.
+ :rtype: :class:`.CloudSigmaDrive`
+ """
+ action = '/drives/%s/' % (drive_id)
+ response = self.connection.request(action=action).object
+ drive = self._to_drive(data=response)
+ return drive
+
+ # Firewall policies extension methods
+
+ def ex_list_firewall_policies(self):
+ """
+ List firewall policies.
+
+ :rtype: ``list`` of :class:`.CloudSigmaFirewallPolicy`
+ """
+ action = '/fwpolicies/detail/'
+ response = self.connection.request(action=action, method='GET').object
+ policies = [self._to_firewall_policy(data=item) for item
+ in response['objects']]
+ return policies
+
+ def ex_create_firewall_policy(self, name, rules=None):
+ """
+ Create a firewall policy.
+
+ :param name: Policy name.
+ :type name: ``str``
+
+ :param rules: List of firewall policy rules to associate with this
+ policy. (optional)
+ :type rules: ``list`` of ``dict``
+
+ :return: Created firewall policy object.
+ :rtype: :class:`.CloudSigmaFirewallPolicy`
+ """
+ data = {}
+ obj = {}
+ obj['name'] = name
+
+ if rules:
+ obj['rules'] = rules
+
+ data['objects'] = [obj]
+
+ action = '/fwpolicies/'
+ response = self.connection.request(action=action, method='POST',
+ data=data).object
+ policy = self._to_firewall_policy(data=response['objects'][0])
+ return policy
+
+ def ex_attach_firewall_policy(self, policy, node, nic_mac=None):
+ """
+ Attach firewall policy to a public NIC interface on the server.
+
+ :param policy: Firewall policy to attach.
+ :type policy: :class:`.CloudSigmaFirewallPolicy`
+
+ :param node: Node to attach policy to.
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :param nic_mac: Optional MAC address of the NIC to add the policy to.
+ If not specified, first public interface is used
+ instead.
+ :type nic_mac: ``str``
+
+ :return: Node object to which the policy was attached to.
+ :rtype: :class:`libcloud.compute.base.Node`
+ """
+ nics = copy.deepcopy(node.extra.get('nics', []))
+
+ if nic_mac:
+ nic = [n for n in nics if n['mac'] == nic_mac]
+ else:
+ nic = nics
+
+ if len(nic) == 0:
+ raise ValueError('Cannot find the NIC interface to attach '
+ 'a policy to')
+
+ nic = nic[0]
+ nic['firewall_policy'] = policy.id
+
+ params = {'nics': nics}
+ node = self.ex_edit_node(node=node, params=params)
+ return node
+
+ def ex_delete_firewall_policy(self, policy):
+ """
+ Delete a firewall policy.
+
+ :param policy: Policy to delete to.
+ :type policy: :class:`.CloudSigmaFirewallPolicy`
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ action = '/fwpolicies/%s/' % (policy.id)
+ response = self.connection.request(action=action, method='DELETE')
+ return response.status == httplib.NO_CONTENT
+
+ # Availability groups extension methods
+
+ def ex_list_servers_availability_groups(self):
+ """
+ Return which running servers share the same physical compute host.
+
+ :return: A list of server UUIDs which share the same physical compute
+ host. Servers which share the same host will be stored under
+ the same list index.
+ :rtype: ``list`` of ``list``
+ """
+ action = '/servers/availability_groups/'
+ response = self.connection.request(action=action, method='GET')
+ return response.object
+
+ def ex_list_drives_availability_groups(self):
+ """
+ Return which drives share the same physical storage host.
+
+ :return: A list of drive UUIDs which share the same physical storage
+ host. Drives which share the same host will be stored under
+ the same list index.
+ :rtype: ``list`` of ``list``
+ """
+ action = '/drives/availability_groups/'
+ response = self.connection.request(action=action, method='GET')
+ return response.object
+
+ # Tag extension methods
+
+ def ex_list_tags(self):
+ """
+ List all the available tags.
+
+ :rtype: ``list`` of :class:`.CloudSigmaTag` objects
+ """
+ action = '/tags/detail/'
+ response = self.connection.request(action=action, method='GET').object
+ tags = [self._to_tag(data=item) for item in response['objects']]
+
+ return tags
+
+ def ex_get_tag(self, tag_id):
+ """
+ Retrieve a single tag.
+
+ :param tag_id: ID of the tag to retrieve.
+ :type tag_id: ``str``
+
+ :rtype: ``list`` of :class:`.CloudSigmaTag` objects
+ """
+ action = '/tags/%s/' % (tag_id)
+ response = self.connection.request(action=action, method='GET').object
+ tag = self._to_tag(data=response)
+ return tag
+
+ def ex_create_tag(self, name, resource_uuids=None):
+ """
+ Create a tag.
+
+ :param name: Tag name.
+ :type name: ``str``
+
+ :param resource_uuids: Optional list of resource UUIDs to assign this
+ tag go.
+ :type resource_uuids: ``list`` of ``str``
+
+ :return: Created tag object.
+ :rtype: :class:`.CloudSigmaTag`
+ """
+ data = {}
+ data['objects'] = [
+ {
+ 'name': name
+ }
+ ]
+
+ if resource_uuids:
+ data['resources'] = resource_uuids
+
+ action = '/tags/'
+ response = self.connection.request(action=action, method='POST',
+ data=data).object
+ tag = self._to_tag(data=response['objects'][0])
+ return tag
+
+ def ex_tag_resource(self, resource, tag):
+ """
+ Associate tag with the provided resource.
+
+ :param resource: Resource to associate a tag with.
+ :type resource: :class:`libcloud.compute.base.Node` or
+ :class:`.CloudSigmaDrive`
+
+ :param tag: Tag to associate with the resources.
+ :type tag: :class:`.CloudSigmaTag`
+
+ :return: Updated tag object.
+ :rtype: :class:`.CloudSigmaTag`
+ """
+ if not hasattr(resource, 'id'):
+ raise ValueError('Resource doesn\'t have id attribute')
+
+ return self.ex_tag_resources(resources=[resource], tag=tag)
+
+ def ex_tag_resources(self, resources, tag):
+ """
+ Associate tag with the provided resources.
+
+ :param resources: Resources to associate a tag with.
+ :type resources: ``list`` of :class:`libcloud.compute.base.Node` or
+ :class:`.CloudSigmaDrive`
+
+ :param tag: Tag to associate with the resources.
+ :type tag: :class:`.CloudSigmaTag`
+
+ :return: Updated tag object.
+ :rtype: :class:`.CloudSigmaTag`
+ """
+
+ resources = tag.resources[:]
+
+ for resource in resources:
+ if not hasattr(resource, 'id'):
+ raise ValueError('Resource doesn\'t have id attribute')
+
+ resources.append(resource.id)
+
+ resources = list(set(resources))
+
+ data = {
+ 'name': tag.name,
+ 'resources': resources
+ }
+
+ action = '/tags/%s/' % (tag.id)
+ response = self.connection.request(action=action, method='PUT',
+ data=data).object
+ tag = self._to_tag(data=response)
+ return tag
+
+ def ex_delete_tag(self, tag):
+ """
+ Delete a tag.
+
+ :param tag: Tag to delete.
+ :type tag: :class:`.CloudSigmaTag`
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ action = '/tags/%s/' % (tag.id)
+ response = self.connection.request(action=action, method='DELETE')
+ return response.status == httplib.NO_CONTENT
+
+ # Account extension methods
+
+ def ex_get_balance(self):
+ """
+ Retrueve account balance information.
+
+ :return: Dictionary with two items ("balance" and "currency").
+ :rtype: ``dict``
+ """
+ action = '/balance/'
+ response = self.connection.request(action=action, method='GET')
+ return response.object
+
+ def ex_get_pricing(self):
+ """
+ Retrive pricing information that are applicable to the cloud.
+
+ :return: Dictionary with pricing information.
+ :rtype: ``dict``
+ """
+ action = '/pricing/'
+ response = self.connection.request(action=action, method='GET')
+ return response.object
+
+ def ex_get_usage(self):
+ """
+ Retrieve account current usage information.
+
+ :return: Dictionary with two items ("balance" and "usage").
+ :rtype: ``dict``
+ """
+ action = '/currentusage/'
+ response = self.connection.request(action=action, method='GET')
+ return response.object
+
+ def ex_list_subscriptions(self, status='all', resources=None):
+ """
+ List subscriptions for this account.
+
+ :param status: Only return subscriptions with the provided status
+ (optional).
+ :type status: ``str``
+ :param resources: Only return subscriptions for the provided resources
+ (optional).
+ :type resources: ``list``
+
+ :rtype: ``list``
+ """
+ params = {}
+
+ if status:
+ params['status'] = status
+
+ if resources:
+ params['resource'] = ','.join(resources)
+
+ response = self.connection.request(action='/subscriptions/',
+ params=params).object
+ subscriptions = self._to_subscriptions(data=response)
+ return subscriptions
+
+ def ex_toggle_subscription_auto_renew(self, subscription):
+ """
+ Toggle subscription auto renew status.
+
+ :param subscription: Subscription to toggle the auto renew flag for.
+ :type subscription: :class:`.CloudSigmaSubscription`
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ path = '/subscriptions/%s/action/' % (subscription.id)
+ response = self._perform_action(path=path, action='auto_renew',
+ method='POST')
+ return response.status == httplib.OK
+
+ def ex_create_subscription(self, amount, period, resource,
+ auto_renew=False):
+ """
+ Create a new subscription.
+
+ :param amount: Subscription amount. For example, in dssd case this
+ would be disk size in gigabytes.
+ :type amount: ``int``
+
+ :param period: Subscription period. For example: 30 days, 1 week, 1
+ month, ...
+ :type period: ``str``
+
+ :param resource: Resource the purchase the subscription for.
+ :type resource: ``str``
+
+ :param auto_renew: True to automatically renew the subscription.
+ :type auto_renew: ``bool``
+ """
+ data = [
+ {
+ 'amount': amount,
+ 'period': period,
+ 'auto_renew': auto_renew,
+ 'resource': resource
+ }
+ ]
+
+ response = self.connection.request(action='/subscriptions/',
+ data=data, method='POST')
+ data = response.object['objects'][0]
+ subscription = self._to_subscription(data=data)
+ return subscription
+
+ # Misc extension methods
+
+ def ex_list_capabilities(self):
+ """
+ Retrieve all the basic and sensible limits of the API.
+
+ :rtype: ``dict``
+ """
+ action = '/capabilities/'
+ response = self.connection.request(action=action,
+ method='GET')
+ capabilities = response.object
+ return capabilities
+
+ def _parse_ips_from_nic(self, nic):
+ """
+ Parse private and public IP addresses from the provided network
+ interface object.
+
+ :param nic: NIC object.
+ :type nic: ``dict``
+
+ :return: (public_ips, private_ips) tuple.
+ :rtype: ``tuple``
+ """
+ public_ips, private_ips = [], []
+
+ ipv4_conf = nic['ip_v4_conf']
+ ipv6_conf = nic['ip_v6_conf']
+
+ ipv4 = ipv4_conf['ip'] if ipv4_conf else None
+ ipv6 = ipv6_conf['ip'] if ipv6_conf else None
+
+ ips = []
+
+ if ipv4:
+ ips.append(ipv4)
+
+ if ipv6:
+ ips.append(ipv6)
+
+ runtime = nic['runtime']
+
+ ip_v4 = runtime['ip_v4'] if nic['runtime'] else None
+ ip_v6 = runtime['ip_v6'] if nic['runtime'] else None
+
+ ipv4 = ip_v4['uuid'] if ip_v4 else None
+ ipv6 = ip_v4['uuid'] if ip_v6 else None
+
+ if ipv4:
+ ips.append(ipv4)
+
+ if ipv6:
+ ips.append(ipv6)
+
+ ips = set(ips)
+
+ for ip in ips:
+ if is_private_subnet(ip):
+ private_ips.append(ip)
+ else:
+ public_ips.append(ip)
+
+ return public_ips, private_ips
+
+ def _to_node(self, data):
+ extra_keys = ['cpu', 'mem', 'nics', 'vnc_password', 'meta']
+
+ id = data['uuid']
+ name = data['name']
+ state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
+
+ public_ips = []
+ private_ips = []
+ extra = self._extract_values(obj=data, keys=extra_keys)
+
+ for nic in data['nics']:
+ _public_ips, _private_ips = self._parse_ips_from_nic(nic=nic)
+
+ public_ips.extend(_public_ips)
+ private_ips.extend(_private_ips)
+
+ node = Node(id=id, name=name, state=state, public_ips=public_ips,
+ private_ips=private_ips, driver=self, extra=extra)
+ return node
+
+ def _to_image(self, data):
+ extra_keys = ['description', 'arch', 'image_type', 'os', 'licenses',
+ 'media', 'meta']
+
+ id = data['uuid']
+ name = data['name']
+ extra = self._extract_values(obj=data, keys=extra_keys)
+
+ image = NodeImage(id=id, name=name, driver=self, extra=extra)
+ return image
+
+ def _to_drive(self, data):
+ id = data['uuid']
+ name = data['name']
+ size = data['size']
+ media = data['media']
+ status = data['status']
+ extra = {}
+
+ drive = CloudSigmaDrive(id=id, name=name, size=size, media=media,
+ status=status, driver=self, extra=extra)
+
+ return drive
+
+ def _to_tag(self, data):
+ resources = data['resources']
+ resources = [resource['uuid'] for resource in resources]
+
+ tag = CloudSigmaTag(id=data['uuid'], name=data['name'],
+ resources=resources)
+ return tag
+
+ def _to_subscriptions(self, data):
+ subscriptions = []
+
+ for item in data['objects']:
+ subscription = self._to_subscription(data=item)
+ subscriptions.append(subscription)
+
+ return subscriptions
+
+ def _to_subscription(self, data):
+ start_time = parse_date(data['start_time'])
+ end_time = parse_date(data['end_time'])
+ obj_uuid = data['subscribed_object']
+
+ subscription = CloudSigmaSubscription(id=data['id'],
+ resource=data['resource'],
+ amount=int(data['amount']),
+ period=data['period'],
+ status=data['status'],
+ price=data['price'],
+ start_time=start_time,
+ end_time=end_time,
+ auto_renew=data['auto_renew'],
+ subscribed_object=obj_uuid)
+ return subscription
+
+ def _to_firewall_policy(self, data):
+ rules = []
+
+ for item in data.get('rules', []):
+ rule = CloudSigmaFirewallPolicyRule(action=item['action'],
+ direction=item['direction'],
+ ip_proto=item['ip_proto'],
+ src_ip=item['src_ip'],
+ src_port=item['src_port'],
+ dst_ip=item['dst_ip'],
+ dst_port=item['dst_port'],
+ comment=item['comment'])
+ rules.append(rule)
+
+ policy = CloudSigmaFirewallPolicy(id=data['uuid'], name=data['name'],
+ rules=rules)
+ return policy
+
+ def _perform_action(self, path, action, method='POST', params=None,
+ data=None):
+ """
+ Perform API action and return response object.
+ """
+ if params:
+ params = params.copy()
+ else:
+ params = {}
+
+ params['do'] = action
+ response = self.connection.request(action=path, method=method,
+ params=params, data=data)
+ return response
+
+ def _is_installation_cd(self, image):
+ """
+ Detect if the provided image is an installation CD.
+
+ :rtype: ``bool``
+ """
+ if isinstance(image, CloudSigmaDrive) and image.media == 'cdrom':
+ return True
+
+ return False
+
+ def _extract_values(self, obj, keys):
+ """
+ Extract values from a dictionary and return a new dictionary with
+ extracted values.
+
+ :param obj: Dictionary to extract values from.
+ :type obj: ``dict``
+
+ :param keys: Keys to extract.
+ :type keys: ``list``
+
+ :return: Dictionary with extracted values.
+ :rtype: ``dict``
+ """
+ result = {}
+
+ for key in keys:
+ result[key] = obj[key]
+
+ return result
+
+ def _wait_for_drive_state_transition(self, drive, state,
+ timeout=DRIVE_TRANSITION_TIMEOUT):
+ """
+ Wait for a drive to transition to the provided state.
+
+ Note: This function blocks and periodically calls "GET drive" endpoint
+ to check if the drive has already transitioned to the desired state.
+
+ :param drive: Drive to wait for.
+ :type drive: :class:`.CloudSigmaDrive`
+
+ :param state: Desired drive state.
+ :type state: ``str``
+
+ :param timeout: How long to wait for the transition (in seconds) before
+ timing out.
+ :type timeout: ``int``
+
+ :return: Drive object.
+ :rtype: :class:`.CloudSigmaDrive`
+ """
+
+ start_time = time.time()
+
+ while drive.status != state:
+ drive = self.ex_get_drive(drive_id=drive.id)
+
+ if drive.status == state:
+ break
+
+ current_time = time.time()
+ delta = (current_time - start_time)
+
+ if delta >= timeout:
+ msg = ('Timed out while waiting for drive transition '
+ '(timeout=%s seconds)' % (timeout))
+ raise Exception(msg)
+
+ time.sleep(self.DRIVE_TRANSITION_SLEEP_INTERVAL)
+
+ return drive
+
+ def _ex_connection_class_kwargs(self):
+ """
+ Return the host value based on the user supplied region.
+ """
+ kwargs = {}
+
+ if not self._host_argument_set:
+ kwargs['host'] = API_ENDPOINTS_2_0[self.region]['host']
+
+ return kwargs
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/cloudstack.py b/awx/lib/site-packages/libcloud/compute/drivers/cloudstack.py
new file mode 100644
index 0000000000..089e3f08b3
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/cloudstack.py
@@ -0,0 +1,2208 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import with_statement
+
+import base64
+import warnings
+
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import urlparse
+
+from libcloud.compute.providers import Provider
+from libcloud.common.cloudstack import CloudStackDriverMixIn
+from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation
+from libcloud.compute.base import NodeSize, StorageVolume
+from libcloud.compute.base import KeyPair
+from libcloud.compute.types import NodeState, LibcloudError
+from libcloud.compute.types import KeyPairDoesNotExistError
+from libcloud.utils.networking import is_private_subnet
+
+
+"""
+Define the extra dictionary for specific resources
+"""
+RESOURCE_EXTRA_ATTRIBUTES_MAP = {
+ 'network': {
+ 'broadcast_domain_type': {
+ 'key_name': 'broadcastdomaintype',
+ 'transform_func': str
+ },
+ 'traffic_type': {
+ 'key_name': 'traffictype',
+ 'transform_func': str
+ },
+ 'zone_name': {
+ 'key_name': 'zonename',
+ 'transform_func': str
+ },
+ 'network_offering_name': {
+ 'key_name': 'networkofferingname',
+ 'transform_func': str
+ },
+ 'network_offeringdisplay_text': {
+ 'key_name': 'networkofferingdisplaytext',
+ 'transform_func': str
+ },
+ 'network_offering_availability': {
+ 'key_name': 'networkofferingavailability',
+ 'transform_func': str
+ },
+ 'is_system': {
+ 'key_name': 'issystem',
+ 'transform_func': str
+ },
+ 'state': {
+ 'key_name': 'state',
+ 'transform_func': str
+ },
+ 'dns1': {
+ 'key_name': 'dns1',
+ 'transform_func': str
+ },
+ 'dns2': {
+ 'key_name': 'dns2',
+ 'transform_func': str
+ },
+ 'type': {
+ 'key_name': 'type',
+ 'transform_func': str
+ },
+ 'acl_type': {
+ 'key_name': 'acltype',
+ 'transform_func': str
+ },
+ 'subdomain_access': {
+ 'key_name': 'subdomainaccess',
+ 'transform_func': str
+ },
+ 'network_domain': {
+ 'key_name': 'networkdomain',
+ 'transform_func': str
+ },
+ 'physical_network_id': {
+ 'key_name': 'physicalnetworkid',
+ 'transform_func': str
+ },
+ 'can_use_for_deploy': {
+ 'key_name': 'canusefordeploy',
+ 'transform_func': str
+ },
+ 'gateway': {
+ 'key_name': 'gateway',
+ 'transform_func': str
+ },
+ 'netmask': {
+ 'key_name': 'netmask',
+ 'transform_func': str
+ },
+ 'vpc_id': {
+ 'key_name': 'vpcid',
+ 'transform_func': str
+ },
+ 'project_id': {
+ 'key_name': 'projectid',
+ 'transform_func': str
+ }
+ },
+ 'node': {
+ 'haenable': {
+ 'key_name': 'haenable',
+ 'transform_func': str
+ },
+ 'zone_id': {
+ 'key_name': 'zoneid',
+ 'transform_func': str
+ },
+ 'zone_name': {
+ 'key_name': 'zonename',
+ 'transform_func': str
+ },
+ 'key_name': {
+ 'key_name': 'keypair',
+ 'transform_func': str
+ },
+ 'password': {
+ 'key_name': 'password',
+ 'transform_func': str
+ },
+ 'image_id': {
+ 'key_name': 'templateid',
+ 'transform_func': str
+ },
+ 'image_name': {
+ 'key_name': 'templatename',
+ 'transform_func': str
+ },
+ 'template_display_text': {
+ 'key_name': 'templatdisplaytext',
+ 'transform_func': str
+ },
+ 'password_enabled': {
+ 'key_name': 'passwordenabled',
+ 'transform_func': str
+ },
+ 'size_id': {
+ 'key_name': 'serviceofferingid',
+ 'transform_func': str
+ },
+ 'size_name': {
+ 'key_name': 'serviceofferingname',
+ 'transform_func': str
+ },
+ 'root_device_id': {
+ 'key_name': 'rootdeviceid',
+ 'transform_func': str
+ },
+ 'root_device_type': {
+ 'key_name': 'rootdevicetype',
+ 'transform_func': str
+ },
+ 'hypervisor': {
+ 'key_name': 'hypervisor',
+ 'transform_func': str
+ },
+ 'project': {
+ 'key_name': 'project',
+ 'transform_func': str
+ },
+ 'project_id': {
+ 'key_name': 'projectid',
+ 'transform_func': str
+ }
+ },
+ 'volume': {
+ 'created': {
+ 'key_name': 'created',
+ 'transform_func': str
+ },
+ 'device_id': {
+ 'key_name': 'deviceid',
+ 'transform_func': int
+ },
+ 'instance_id': {
+ 'key_name': 'serviceofferingid',
+ 'transform_func': str
+ },
+ 'state': {
+ 'key_name': 'state',
+ 'transform_func': str
+ },
+ 'volume_type': {
+ 'key_name': 'type',
+ 'transform_func': str
+ },
+ 'zone_id': {
+ 'key_name': 'zoneid',
+ 'transform_func': str
+ },
+ 'zone_name': {
+ 'key_name': 'zonename',
+ 'transform_func': str
+ }
+ },
+ 'project': {
+ 'account': {'key_name': 'account', 'transform_func': str},
+ 'cpuavailable': {'key_name': 'cpuavailable', 'transform_func': int},
+ 'cpulimit': {'key_name': 'cpulimit', 'transform_func': int},
+ 'cputotal': {'key_name': 'cputotal', 'transform_func': int},
+ 'domain': {'key_name': 'domain', 'transform_func': str},
+ 'domainid': {'key_name': 'domainid', 'transform_func': str},
+ 'ipavailable': {'key_name': 'ipavailable', 'transform_func': int},
+ 'iplimit': {'key_name': 'iplimit', 'transform_func': int},
+ 'iptotal': {'key_name': 'iptotal', 'transform_func': int},
+ 'memoryavailable': {'key_name': 'memoryavailable',
+ 'transform_func': int},
+ 'memorylimit': {'key_name': 'memorylimit', 'transform_func': int},
+ 'memorytotal': {'key_name': 'memorytotal', 'transform_func': int},
+ 'networkavailable': {'key_name': 'networkavailable',
+ 'transform_func': int},
+ 'networklimit': {'key_name': 'networklimit', 'transform_func': int},
+ 'networktotal': {'key_name': 'networktotal', 'transform_func': int},
+ 'primarystorageavailable': {'key_name': 'primarystorageavailable',
+ 'transform_func': int},
+ 'primarystoragelimit': {'key_name': 'primarystoragelimit',
+ 'transform_func': int},
+ 'primarystoragetotal': {'key_name': 'primarystoragetotal',
+ 'transform_func': int},
+ 'secondarystorageavailable': {'key_name': 'secondarystorageavailable',
+ 'transform_func': int},
+ 'secondarystoragelimit': {'key_name': 'secondarystoragelimit',
+ 'transform_func': int},
+ 'secondarystoragetotal': {'key_name': 'secondarystoragetotal',
+ 'transform_func': int},
+ 'snapshotavailable': {'key_name': 'snapshotavailable',
+ 'transform_func': int},
+ 'snapshotlimit': {'key_name': 'snapshotlimit', 'transform_func': int},
+ 'snapshottotal': {'key_name': 'snapshottotal', 'transform_func': int},
+ 'state': {'key_name': 'state', 'transform_func': str},
+ 'tags': {'key_name': 'tags', 'transform_func': str},
+ 'templateavailable': {'key_name': 'templateavailable',
+ 'transform_func': int},
+ 'templatelimit': {'key_name': 'templatelimit', 'transform_func': int},
+ 'templatetotal': {'key_name': 'templatetotal', 'transform_func': int},
+ 'vmavailable': {'key_name': 'vmavailable', 'transform_func': int},
+ 'vmlimit': {'key_name': 'vmlimit', 'transform_func': int},
+ 'vmrunning': {'key_name': 'vmrunning', 'transform_func': int},
+ 'vmtotal': {'key_name': 'vmtotal', 'transform_func': int},
+ 'volumeavailable': {'key_name': 'volumeavailable',
+ 'transform_func': int},
+ 'volumelimit': {'key_name': 'volumelimit', 'transform_func': int},
+ 'volumetotal': {'key_name': 'volumetotal', 'transform_func': int},
+ 'vpcavailable': {'key_name': 'vpcavailable', 'transform_func': int},
+ 'vpclimit': {'key_name': 'vpclimit', 'transform_func': int},
+ 'vpctotal': {'key_name': 'vpctotal', 'transform_func': int}
+ }
+}
+
+
+class CloudStackNode(Node):
+ """
+ Subclass of Node so we can expose our extension methods.
+ """
+
+ def ex_allocate_public_ip(self):
+ """
+ Allocate a public IP and bind it to this node.
+ """
+ return self.driver.ex_allocate_public_ip(self)
+
+ def ex_release_public_ip(self, address):
+ """
+ Release a public IP that this node holds.
+ """
+ return self.driver.ex_release_public_ip(self, address)
+
+ def ex_create_ip_forwarding_rule(self, address, protocol,
+ start_port, end_port=None):
+ """
+ Add a NAT/firewall forwarding rule for a port or ports.
+ """
+ return self.driver.ex_create_ip_forwarding_rule(node=self,
+ address=address,
+ protocol=protocol,
+ start_port=start_port,
+ end_port=end_port)
+
+ def ex_create_port_forwarding_rule(self, address,
+ private_port, public_port,
+ protocol,
+ public_end_port=None,
+ private_end_port=None,
+ openfirewall=True):
+ """
+ Add a port forwarding rule for port or ports.
+ """
+ return self.driver.ex_create_port_forwarding_rule(
+ node=self, address=address, private_port=private_port,
+ public_port=public_port, protocol=protocol,
+ public_end_port=public_end_port, private_end_port=private_end_port,
+ openfirewall=openfirewall)
+
+ def ex_delete_ip_forwarding_rule(self, rule):
+ """
+ Delete a port forwarding rule.
+ """
+ return self.driver.ex_delete_ip_forwarding_rule(node=self, rule=rule)
+
+ def ex_delete_port_forwarding_rule(self, rule):
+ """
+ Delete a NAT/firewall rule.
+ """
+ return self.driver.ex_delete_port_forwarding_rule(node=self, rule=rule)
+
+ def ex_start(self):
+ """
+ Starts a stopped virtual machine.
+ """
+ return self.driver.ex_start(node=self)
+
+ def ex_stop(self):
+ """
+ Stops a running virtual machine.
+ """
+ return self.driver.ex_stop(node=self)
+
+
+class CloudStackAddress(object):
+ """
+ A public IP address.
+
+ :param id: UUID of the Public IP
+ :type id: ``str``
+
+ :param address: The public IP address
+ :type address: ``str``
+
+ :param associated_network_id: The ID of the network where this address
+ has been associated with
+ :type associated_network_id: ``str``
+ """
+
+ def __init__(self, id, address, driver, associated_network_id=None):
+ self.id = id
+ self.address = address
+ self.driver = driver
+ self.associated_network_id = associated_network_id
+
+ def release(self):
+ self.driver.ex_release_public_ip(address=self)
+
+ def __str__(self):
+ return self.address
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__ and self.id == other.id
+
+
+class CloudStackIPForwardingRule(object):
+ """
+ A NAT/firewall forwarding rule.
+ """
+
+ def __init__(self, node, id, address, protocol, start_port, end_port=None):
+ self.node = node
+ self.id = id
+ self.address = address
+ self.protocol = protocol
+ self.start_port = start_port
+ self.end_port = end_port
+
+ def delete(self):
+ self.node.ex_delete_ip_forwarding_rule(rule=self)
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__ and self.id == other.id
+
+
+class CloudStackPortForwardingRule(object):
+ """
+ A Port forwarding rule for Source NAT.
+ """
+
+ def __init__(self, node, rule_id, address, protocol, public_port,
+ private_port, public_end_port=None, private_end_port=None):
+ """
+ A Port forwarding rule for Source NAT.
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param node: Node for rule
+ :type node: :class:`Node`
+
+ :param rule_id: Rule ID
+ :type rule_id: ``int``
+
+ :param address: External IP address
+ :type address: :class:`CloudStackAddress`
+
+ :param protocol: TCP/IP Protocol (TCP, UDP)
+ :type protocol: ``str``
+
+ :param public_port: External port for rule (or start port if
+ public_end_port is also provided)
+ :type public_port: ``int``
+
+ :param private_port: Internal node port for rule (or start port if
+ public_end_port is also provided)
+ :type private_port: ``int``
+
+ :param public_end_port: End of external port range
+ :type public_end_port: ``int``
+
+ :param private_end_port: End of internal port range
+ :type private_end_port: ``int``
+
+ :rtype: :class:`CloudStackPortForwardingRule`
+ """
+ self.node = node
+ self.id = rule_id
+ self.address = address
+ self.protocol = protocol
+ self.public_port = public_port
+ self.public_end_port = public_end_port
+ self.private_port = private_port
+ self.private_end_port = private_end_port
+
+ def delete(self):
+ self.node.ex_delete_port_forwarding_rule(rule=self)
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__ and self.id == other.id
+
+
+class CloudStackDiskOffering(object):
+ """
+ A disk offering within CloudStack.
+ """
+
+ def __init__(self, id, name, size, customizable):
+ self.id = id
+ self.name = name
+ self.size = size
+ self.customizable = customizable
+
+ def __eq__(self, other):
+ return self.__class__ is other.__class__ and self.id == other.id
+
+
+class CloudStackNetwork(object):
+ """
+ Class representing a CloudStack Network.
+ """
+
+ def __init__(self, displaytext, name, networkofferingid, id, zoneid,
+ driver, extra=None):
+ self.displaytext = displaytext
+ self.name = name
+ self.networkofferingid = networkofferingid
+ self.id = id
+ self.zoneid = zoneid
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.displaytext, self.name,
+ self.networkofferingid, self.zoneid, self.driver.name))
+
+
+class CloudStackNetworkOffering(object):
+ """
+ Class representing a CloudStack Network Offering.
+ """
+
+ def __init__(self, name, display_text, guest_ip_type, id,
+ service_offering_id, for_vpc, driver, extra=None):
+ self.display_text = display_text
+ self.name = name
+ self.guest_ip_type = guest_ip_type
+ self.id = id
+ self.service_offering_id = service_offering_id
+ self.for_vpc = for_vpc
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.display_text,
+ self.guest_ip_type, self.service_offering_id, self.for_vpc,
+ self.driver.name))
+
+
+class CloudStackProject(object):
+ """
+ Class representing a CloudStack Project.
+ """
+
+ def __init__(self, id, name, display_text, driver, extra=None):
+ self.id = id
+ self.name = name
+ self.display_text = display_text
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.display_text, self.name,
+ self.driver.name))
+
+
+class CloudStackNodeDriver(CloudStackDriverMixIn, NodeDriver):
+ """
+ Driver for the CloudStack API.
+
+ :cvar host: The host where the API can be reached.
+ :cvar path: The path where the API can be reached.
+ :cvar async_poll_frequency: How often (in seconds) to poll for async
+ job completion.
+ :type async_poll_frequency: ``int``"""
+
+ name = 'CloudStack'
+ api_name = 'cloudstack'
+ website = 'http://cloudstack.org/'
+ type = Provider.CLOUDSTACK
+
+ features = {'create_node': ['generates_password']}
+
+ NODE_STATE_MAP = {
+ 'Running': NodeState.RUNNING,
+ 'Starting': NodeState.REBOOTING,
+ 'Stopped': NodeState.STOPPED,
+ 'Stopping': NodeState.PENDING,
+ 'Destroyed': NodeState.TERMINATED,
+ 'Expunging': NodeState.PENDING,
+ 'Error': NodeState.TERMINATED
+ }
+
+ def __init__(self, key, secret=None, secure=True, host=None,
+ path=None, port=None, url=None, *args, **kwargs):
+ """
+ :inherits: :class:`NodeDriver.__init__`
+
+ :param host: The host where the API can be reached. (required)
+ :type host: ``str``
+
+ :param path: The path where the API can be reached. (required)
+ :type path: ``str``
+
+ :param url: Full URL to the API endpoint. Mutually exclusive with host
+ and path argument.
+ :type url: ``str``
+ """
+ if url:
+ parsed = urlparse.urlparse(url)
+
+ path = parsed.path
+
+ scheme = parsed.scheme
+ split = parsed.netloc.split(':')
+
+ if len(split) == 1:
+ # No port provided, use the default one
+ host = parsed.netloc
+ port = 443 if scheme == 'https' else 80
+ else:
+ host = split[0]
+ port = int(split[1])
+ else:
+ host = host if host else self.host
+ path = path if path else self.path
+
+ if path is not None:
+ self.path = path
+
+ if host is not None:
+ self.host = host
+
+ if (self.type == Provider.CLOUDSTACK) and (not host or not path):
+ raise Exception('When instantiating CloudStack driver directly '
+ 'you also need to provide url or host and path '
+ 'argument')
+
+ super(CloudStackNodeDriver, self).__init__(key=key,
+ secret=secret,
+ secure=secure,
+ host=host,
+ port=port)
+
+ def list_images(self, location=None):
+ args = {
+ 'templatefilter': 'executable'
+ }
+ if location is not None:
+ args['zoneid'] = location.id
+ imgs = self._sync_request(command='listTemplates',
+ params=args,
+ method='GET')
+ images = []
+ for img in imgs.get('template', []):
+ images.append(NodeImage(
+ id=img['id'],
+ name=img['name'],
+ driver=self.connection.driver,
+ extra={
+ 'hypervisor': img['hypervisor'],
+ 'format': img['format'],
+ 'os': img['ostypename'],
+ 'displaytext': img['displaytext']}))
+ return images
+
+ def list_locations(self):
+ """
+ :rtype ``list`` of :class:`NodeLocation`
+ """
+ locs = self._sync_request('listZones')
+
+ locations = []
+ for loc in locs['zone']:
+ location = NodeLocation(str(loc['id']), loc['name'], 'Unknown',
+ self)
+ locations.append(location)
+
+ return locations
+
+ def list_nodes(self, project=None):
+ """
+ @inherits: :class:`NodeDriver.list_nodes`
+
+ :keyword project: Limit nodes returned to those configured under
+ the defined project.
+ :type project: :class:`.CloudStackProject`
+
+ :rtype: ``list`` of :class:`CloudStackNode`
+ """
+
+ args = {}
+ if project:
+ args['projectid'] = project.id
+ vms = self._sync_request('listVirtualMachines', params=args)
+ addrs = self._sync_request('listPublicIpAddresses', params=args)
+
+ public_ips_map = {}
+ for addr in addrs.get('publicipaddress', []):
+ if 'virtualmachineid' not in addr:
+ continue
+ vm_id = str(addr['virtualmachineid'])
+ if vm_id not in public_ips_map:
+ public_ips_map[vm_id] = {}
+ public_ips_map[vm_id][addr['ipaddress']] = addr['id']
+
+ nodes = []
+
+ for vm in vms.get('virtualmachine', []):
+ public_ips = public_ips_map.get(str(vm['id']), {}).keys()
+ public_ips = list(public_ips)
+ node = self._to_node(data=vm, public_ips=public_ips)
+
+ addresses = public_ips_map.get(vm['id'], {}).items()
+ addresses = [CloudStackAddress(node, v, k) for k, v in addresses]
+ node.extra['ip_addresses'] = addresses
+
+ rules = []
+ for addr in addresses:
+ result = self._sync_request('listIpForwardingRules')
+ for r in result.get('ipforwardingrule', []):
+ if str(r['virtualmachineid']) == node.id:
+ rule = CloudStackIPForwardingRule(node, r['id'],
+ addr,
+ r['protocol']
+ .upper(),
+ r['startport'],
+ r['endport'])
+ rules.append(rule)
+ node.extra['ip_forwarding_rules'] = rules
+
+ rules = []
+ public_ips = self.ex_list_public_ips()
+ result = self._sync_request('listPortForwardingRules')
+ for r in result.get('portforwardingrule', []):
+ if str(r['virtualmachineid']) == node.id:
+ addr = [a for a in public_ips if
+ a.address == r['ipaddress']]
+ rule = CloudStackPortForwardingRule(node, r['id'],
+ addr[0],
+ r['protocol'].upper(),
+ r['publicport'],
+ r['privateport'],
+ r['publicendport'],
+ r['privateendport'])
+ if not addr[0].address in node.public_ips:
+ node.public_ips.append(addr[0].address)
+ rules.append(rule)
+ node.extra['port_forwarding_rules'] = rules
+
+ nodes.append(node)
+
+ return nodes
+
+ def list_sizes(self, location=None):
+ """
+ :rtype ``list`` of :class:`NodeSize`
+ """
+ szs = self._sync_request(command='listServiceOfferings',
+ method='GET')
+ sizes = []
+ for sz in szs['serviceoffering']:
+ extra = {'cpu': sz['cpunumber']}
+ sizes.append(NodeSize(sz['id'], sz['name'], sz['memory'], 0, 0,
+ 0, self, extra=extra))
+ return sizes
+
+ def create_node(self, **kwargs):
+ """
+ Create a new node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword networks: Optional list of networks to launch the server
+ into.
+ :type networks: ``list`` of :class:`.CloudStackNetwork`
+
+ :keyword project: Optional project to create the new node under.
+ :type project: :class:`.CloudStackProject`
+
+ :keyword diskoffering: Optional disk offering to add to the new
+ node.
+ :type diskoffering: :class:`.CloudStackDiskOffering`
+
+ :keyword ex_keyname: Name of existing keypair
+ :type ex_keyname: ``str``
+
+ :keyword ex_userdata: String containing user data
+ :type ex_userdata: ``str``
+
+ :keyword ex_security_groups: List of security groups to assign to
+ the node
+ :type ex_security_groups: ``list`` of ``str``
+
+ :keyword ex_displayname: String containing instance display name
+ :type ex_displayname: ``str``
+
+ :rtype: :class:`.CloudStackNode`
+ """
+
+ server_params = self._create_args_to_params(None, **kwargs)
+
+ data = self._async_request(command='deployVirtualMachine',
+ params=server_params,
+ method='GET')['virtualmachine']
+ node = self._to_node(data=data)
+ return node
+
+ def _create_args_to_params(self, node, **kwargs):
+ server_params = {}
+
+ # TODO: Refactor and use "kwarg_to_server_params" map
+ name = kwargs.get('name', None)
+ size = kwargs.get('size', None)
+ image = kwargs.get('image', None)
+ location = kwargs.get('location', None)
+ networks = kwargs.get('networks', None)
+ project = kwargs.get('project', None)
+ diskoffering = kwargs.get('diskoffering', None)
+ ex_key_name = kwargs.get('ex_keyname', None)
+ ex_user_data = kwargs.get('ex_userdata', None)
+ ex_security_groups = kwargs.get('ex_security_groups', None)
+ ex_displayname = kwargs.get('ex_displayname', None)
+
+ if name:
+ server_params['name'] = name
+
+ if ex_displayname:
+ server_params['displayname'] = ex_displayname
+
+ if size:
+ server_params['serviceofferingid'] = size.id
+
+ if image:
+ server_params['templateid'] = image.id
+
+ if location:
+ server_params['zoneid'] = location.id
+ else:
+ # Use a default location
+ server_params['zoneid'] = self.list_locations()[0].id
+
+ if networks:
+ networks = ','.join([network.id for network in networks])
+ server_params['networkids'] = networks
+
+ if project:
+ server_params['projectid'] = project.id
+
+ if diskoffering:
+ server_params['diskofferingid'] = diskoffering.id
+
+ if ex_key_name:
+ server_params['keypair'] = ex_key_name
+
+ if ex_user_data:
+ ex_user_data = base64.b64encode(b(ex_user_data).decode('ascii'))
+ server_params['userdata'] = ex_user_data
+
+ if ex_security_groups:
+ ex_security_groups = ','.join(ex_security_groups)
+ server_params['securitygroupnames'] = ex_security_groups
+
+ return server_params
+
+ def destroy_node(self, node):
+ """
+ @inherits: :class:`NodeDriver.reboot_node`
+ :type node: :class:`CloudStackNode`
+
+ :rtype: ``bool``
+ """
+ self._async_request(command='destroyVirtualMachine',
+ params={'id': node.id},
+ method='GET')
+ return True
+
+ def reboot_node(self, node):
+ """
+ @inherits: :class:`NodeDriver.reboot_node`
+ :type node: :class:`CloudStackNode`
+
+ :rtype: ``bool``
+ """
+ self._async_request(command='rebootVirtualMachine',
+ params={'id': node.id},
+ method='GET')
+ return True
+
+ def ex_start(self, node):
+ """
+ Starts/Resumes a stopped virtual machine
+
+ :type node: :class:`CloudStackNode`
+
+ :param id: The ID of the virtual machine (required)
+ :type id: ``str``
+
+ :param hostid: destination Host ID to deploy the VM to
+ parameter available for root admin only
+ :type hostid: ``str``
+
+ :rtype ``str``
+ """
+ res = self._async_request(command='startVirtualMachine',
+ params={'id': node.id},
+ method='GET')
+ return res['virtualmachine']['state']
+
+ def ex_stop(self, node):
+ """
+ Stops/Suspends a running virtual machine
+
+ :param node: Node to stop.
+ :type node: :class:`CloudStackNode`
+
+ :rtype: ``str``
+ """
+ res = self._async_request(command='stopVirtualMachine',
+ params={'id': node.id},
+ method='GET')
+ return res['virtualmachine']['state']
+
+ def ex_list_disk_offerings(self):
+ """
+ Fetch a list of all available disk offerings.
+
+ :rtype: ``list`` of :class:`CloudStackDiskOffering`
+ """
+
+ diskOfferings = []
+
+ diskOfferResponse = self._sync_request(command='listDiskOfferings',
+ method='GET')
+ for diskOfferDict in diskOfferResponse.get('diskoffering', ()):
+ diskOfferings.append(
+ CloudStackDiskOffering(
+ id=diskOfferDict['id'],
+ name=diskOfferDict['name'],
+ size=diskOfferDict['disksize'],
+ customizable=diskOfferDict['iscustomized']))
+
+ return diskOfferings
+
+ def ex_list_networks(self):
+ """
+ List the available networks
+
+ :rtype ``list`` of :class:`CloudStackNetwork`
+ """
+
+ res = self._sync_request(command='listNetworks',
+ method='GET')
+ nets = res.get('network', [])
+
+ networks = []
+ extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network']
+ for net in nets:
+ extra = self._get_extra_dict(net, extra_map)
+
+ if 'tags' in net:
+ extra['tags'] = self._get_resource_tags(net['tags'])
+
+ networks.append(CloudStackNetwork(
+ net['displaytext'],
+ net['name'],
+ net['networkofferingid'],
+ net['id'],
+ net['zoneid'],
+ self,
+ extra=extra))
+
+ return networks
+
+ def ex_list_network_offerings(self):
+ """
+ List the available network offerings
+
+ :rtype ``list`` of :class:`CloudStackNetworkOffering`
+ """
+ res = self._sync_request(command='listNetworkOfferings',
+ method='GET')
+ netoffers = res.get('networkoffering', [])
+
+ networkofferings = []
+
+ for netoffer in netoffers:
+ networkofferings.append(CloudStackNetworkOffering(
+ netoffer['name'],
+ netoffer['displaytext'],
+ netoffer['guestiptype'],
+ netoffer['id'],
+ netoffer['serviceofferingid'],
+ netoffer['forvpc'],
+ self))
+
+ return networkofferings
+
+ def ex_create_network(self, display_text, name, network_offering,
+ location, gateway=None, netmask=None,
+ network_domain=None, vpc_id=None, project_id=None):
+ """
+
+ Creates a Network, only available in advanced zones.
+
+ :param display_text: the display text of the network
+ :type display_text: ``str``
+
+ :param name: the name of the network
+ :type name: ``str``
+
+ :param network_offering: the network offering id
+ :type network_offering: :class:'CloudStackNetworkOffering`
+
+ :param location: Zone
+ :type location: :class:`NodeLocation`
+
+ :param gateway: Optional, the Gateway of this network
+ :type gateway: ``str``
+
+ :param netmask: Optional, the netmask of this network
+ :type netmask: ``str``
+
+ :param network_domain: Optional, the DNS domain of the network
+ :type network_domain: ``str``
+
+ :param vpc_id: Optional, the VPC id the network belongs to
+ :type vpc_id: ``str``
+
+ :param project_id: Optional, the project id the networks belongs to
+ :type project_id: ``str``
+
+ :rtype: :class:`CloudStackNetwork`
+
+ """
+
+ extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network']
+
+ args = {
+ 'displaytext': display_text,
+ 'name': name,
+ 'networkofferingid': network_offering.id,
+ 'zoneid': location.id,
+ }
+
+ if gateway is not None:
+ args['gateway'] = gateway
+
+ if netmask is not None:
+ args['netmask'] = netmask
+
+ if network_domain is not None:
+ args['networkdomain'] = network_domain
+
+ if vpc_id is not None:
+ args['vpcid'] = vpc_id
+
+ if project_id is not None:
+ args['projectid'] = project_id
+
+ """ Cloudstack allows for duplicate network names,
+ this should be handled in the code leveraging libcloud
+ As there could be use cases for duplicate names.
+ e.g. management from ROOT level"""
+
+ # for net in self.ex_list_networks():
+ # if name == net.name:
+ # raise LibcloudError('This network name already exists')
+
+ result = self._sync_request(command='createNetwork',
+ params=args,
+ method='GET')
+
+ result = result['network']
+ extra = self._get_extra_dict(result, extra_map)
+
+ network = CloudStackNetwork(display_text,
+ name,
+ network_offering.id,
+ result['id'],
+ location.id,
+ self,
+ extra=extra)
+
+ return network
+
+ def ex_delete_network(self, network, force=None):
+ """
+
+ Deletes a Network, only available in advanced zones.
+
+ :param network: The network
+ :type network: :class: 'CloudStackNetwork'
+
+ :param force: Force deletion of the network?
+ :type force: ``bool``
+
+ :rtype: ``bool``
+
+ """
+
+ args = {'id': network.id, 'forced': force}
+
+ self._async_request(command='deleteNetwork',
+ params=args,
+ method='GET')
+ return True
+
+ def ex_list_projects(self):
+ """
+ List the available projects
+
+ :rtype ``list`` of :class:`CloudStackProject`
+ """
+
+ res = self._sync_request(command='listProjects',
+ method='GET')
+ projs = res.get('project', [])
+
+ projects = []
+ extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['project']
+ for proj in projs:
+ extra = self._get_extra_dict(proj, extra_map)
+
+ if 'tags' in proj:
+ extra['tags'] = self._get_resource_tags(proj['tags'])
+
+ projects.append(CloudStackProject(
+ id=proj['id'],
+ name=proj['name'],
+ display_text=proj['displaytext'],
+ driver=self,
+ extra=extra))
+
+ return projects
+
+ def create_volume(self, size, name, location=None, snapshot=None):
+ """
+ Creates a data volume
+ Defaults to the first location
+ """
+ for diskOffering in self.ex_list_disk_offerings():
+ if diskOffering.size == size or diskOffering.customizable:
+ break
+ else:
+ raise LibcloudError(
+ 'Disk offering with size=%s not found' % size)
+
+ if location is None:
+ location = self.list_locations()[0]
+
+ params = {'name': name,
+ 'diskOfferingId': diskOffering.id,
+ 'zoneId': location.id}
+
+ if diskOffering.customizable:
+ params['size'] = size
+
+ requestResult = self._async_request(command='createVolume',
+ params=params,
+ method='GET')
+
+ volumeResponse = requestResult['volume']
+
+ return StorageVolume(id=volumeResponse['id'],
+ name=name,
+ size=size,
+ driver=self,
+ extra=dict(name=volumeResponse['name']))
+
+ def destroy_volume(self, volume):
+ """
+ :rtype: ``bool``
+ """
+ self._sync_request(command='deleteVolume',
+ params={'id': volume.id},
+ method='GET')
+ return True
+
+ def attach_volume(self, node, volume, device=None):
+ """
+ @inherits: :class:`NodeDriver.attach_volume`
+ :type node: :class:`CloudStackNode`
+
+ :rtype: ``bool``
+ """
+ # TODO Add handling for device name
+ self._async_request(command='attachVolume',
+ params={'id': volume.id,
+ 'virtualMachineId': node.id},
+ method='GET')
+ return True
+
+ def detach_volume(self, volume):
+ """
+ :rtype: ``bool``
+ """
+ self._async_request(command='detachVolume',
+ params={'id': volume.id},
+ method='GET')
+ return True
+
+ def list_volumes(self, node=None):
+ """
+ List all volumes
+
+ :param node: Only return volumes for the provided node.
+ :type node: :class:`CloudStackNode`
+
+ :rtype: ``list`` of :class:`StorageVolume`
+ """
+ if node:
+ volumes = self._sync_request(command='listVolumes',
+ params={'virtualmachineid': node.id},
+ method='GET')
+ else:
+ volumes = self._sync_request(command='listVolumes',
+ method='GET')
+
+ list_volumes = []
+ extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['volume']
+ for vol in volumes['volume']:
+ extra = self._get_extra_dict(vol, extra_map)
+
+ if 'tags' in vol:
+ extra['tags'] = self._get_resource_tags(vol['tags'])
+
+ list_volumes.append(StorageVolume(id=vol['id'],
+ name=vol['name'],
+ size=vol['size'],
+ driver=self,
+ extra=extra))
+ return list_volumes
+
+ def list_key_pairs(self, **kwargs):
+ """
+ List registered key pairs.
+
+ :param projectid: list objects by project
+ :type projectid: ``str``
+
+ :param page: The page to list the keypairs from
+ :type page: ``int``
+
+ :param keyword: List by keyword
+ :type keyword: ``str``
+
+ :param listall: If set to false, list only resources
+ belonging to the command's caller;
+ if set to true - list resources that
+ the caller is authorized to see.
+ Default value is false
+
+ :type listall: ``bool``
+
+ :param pagesize: The number of results per page
+ :type pagesize: ``int``
+
+ :param account: List resources by account.
+ Must be used with the domainId parameter
+ :type account: ``str``
+
+ :param isrecursive: Defaults to false, but if true,
+ lists all resources from
+ the parent specified by the
+ domainId till leaves.
+ :type isrecursive: ``bool``
+
+ :param fingerprint: A public key fingerprint to look for
+ :type fingerprint: ``str``
+
+ :param name: A key pair name to look for
+ :type name: ``str``
+
+ :param domainid: List only resources belonging to
+ the domain specified
+ :type domainid: ``str``
+
+ :return: A list of key par objects.
+ :rtype: ``list`` of :class:`libcloud.compute.base.KeyPair`
+ """
+ extra_args = kwargs.copy()
+ res = self._sync_request(command='listSSHKeyPairs',
+ params=extra_args,
+ method='GET')
+ key_pairs = res.get('sshkeypair', [])
+ key_pairs = self._to_key_pairs(data=key_pairs)
+ return key_pairs
+
+ def get_key_pair(self, name):
+ params = {'name': name}
+ res = self._sync_request(command='listSSHKeyPairs',
+ params=params,
+ method='GET')
+ key_pairs = res.get('sshkeypair', [])
+
+ if len(key_pairs) == 0:
+ raise KeyPairDoesNotExistError(name=name, driver=self)
+
+ key_pair = self._to_key_pair(data=key_pairs[0])
+ return key_pair
+
+ def create_key_pair(self, name, **kwargs):
+ """
+ Create a new key pair object.
+
+ :param name: Key pair name.
+ :type name: ``str``
+
+ :param name: Name of the keypair (required)
+ :type name: ``str``
+
+ :param projectid: An optional project for the ssh key
+ :type projectid: ``str``
+
+ :param domainid: An optional domainId for the ssh key.
+ If the account parameter is used,
+ domainId must also be used.
+ :type domainid: ``str``
+
+ :param account: An optional account for the ssh key.
+ Must be used with domainId.
+ :type account: ``str``
+
+ :return: Created key pair object.
+ :rtype: :class:`libcloud.compute.base.KeyPair`
+ """
+ extra_args = kwargs.copy()
+
+ params = {'name': name}
+ params.update(extra_args)
+
+ res = self._sync_request(command='createSSHKeyPair',
+ params=params,
+ method='GET')
+ key_pair = self._to_key_pair(data=res['keypair'])
+ return key_pair
+
+ def import_key_pair_from_string(self, name, key_material):
+ """
+ Import a new public key from string.
+
+ :param name: Key pair name.
+ :type name: ``str``
+
+ :param key_material: Public key material.
+ :type key_material: ``str``
+
+ :return: Imported key pair object.
+ :rtype: :class:`libcloud.compute.base.KeyPair`
+ """
+ res = self._sync_request(command='registerSSHKeyPair',
+ params={'name': name,
+ 'publickey': key_material},
+ method='GET')
+ key_pair = self._to_key_pair(data=res['keypair'])
+ return key_pair
+
+ def delete_key_pair(self, key_pair, **kwargs):
+ """
+ Delete an existing key pair.
+
+ :param key_pair: Key pair object.
+ :type key_pair: :class`libcloud.compute.base.KeyPair`
+
+ :param projectid: The project associated with keypair
+ :type projectid: ``str``
+
+ :param domainid: The domain ID associated with the keypair
+ :type domainid: ``str``
+
+ :param account: The account associated with the keypair.
+ Must be used with the domainId parameter.
+ :type account: ``str``
+
+ :return: True of False based on success of Keypair deletion
+ :rtype: ``bool``
+ """
+
+ extra_args = kwargs.copy()
+ params = {'name': key_pair.name}
+ params.update(extra_args)
+
+ res = self._sync_request(command='deleteSSHKeyPair',
+ params=params,
+ method='GET')
+ return res['success'] == 'true'
+
+ def ex_list_public_ips(self):
+ """
+ Lists all Public IP Addresses.
+
+ :rtype: ``list`` of :class:`CloudStackAddress`
+ """
+ ips = []
+
+ res = self._sync_request(command='listPublicIpAddresses',
+ method='GET')
+
+ # Workaround for basic zones
+ if not res:
+ return ips
+
+ for ip in res['publicipaddress']:
+ ips.append(CloudStackAddress(ip['id'],
+ ip['ipaddress'],
+ self,
+ ip['associatednetworkid']))
+ return ips
+
+ def ex_allocate_public_ip(self, location=None):
+ """
+ Allocate a public IP.
+
+ :param location: Zone
+ :type location: :class:`NodeLocation`
+
+ :rtype: :class:`CloudStackAddress`
+ """
+ if location is None:
+ location = self.list_locations()[0]
+
+ addr = self._async_request(command='associateIpAddress',
+ params={'zoneid': location.id},
+ method='GET')
+ addr = addr['ipaddress']
+ addr = CloudStackAddress(addr['id'], addr['ipaddress'], self)
+ return addr
+
+ def ex_release_public_ip(self, address):
+ """
+ Release a public IP.
+
+ :param address: CloudStackAddress which should be used
+ :type address: :class:`CloudStackAddress`
+
+ :rtype: ``bool``
+ """
+ res = self._async_request(command='disassociateIpAddress',
+ params={'id': address.id},
+ method='GET')
+ return res['success']
+
+ def ex_list_port_forwarding_rules(self):
+ """
+ Lists all Port Forwarding Rules
+
+ :rtype: ``list`` of :class:`CloudStackPortForwardingRule`
+ """
+ rules = []
+ result = self._sync_request(command='listPortForwardingRules',
+ method='GET')
+ if result != {}:
+ public_ips = self.ex_list_public_ips()
+ nodes = self.list_nodes()
+ for rule in result['portforwardingrule']:
+ node = [n for n in nodes
+ if n.id == str(rule['virtualmachineid'])]
+ addr = [a for a in public_ips if
+ a.address == rule['ipaddress']]
+ rules.append(CloudStackPortForwardingRule
+ (node[0],
+ rule['id'],
+ addr[0],
+ rule['protocol'],
+ rule['publicport'],
+ rule['privateport'],
+ rule['publicendport'],
+ rule['privateendport']))
+
+ return rules
+
+ def ex_create_port_forwarding_rule(self, node, address,
+ private_port, public_port,
+ protocol,
+ public_end_port=None,
+ private_end_port=None,
+ openfirewall=True):
+ """
+ Creates a Port Forwarding Rule, used for Source NAT
+
+ :param address: IP address of the Source NAT
+ :type address: :class:`CloudStackAddress`
+
+ :param private_port: Port of the virtual machine
+ :type private_port: ``int``
+
+ :param protocol: Protocol of the rule
+ :type protocol: ``str``
+
+ :param public_port: Public port on the Source NAT address
+ :type public_port: ``int``
+
+ :param node: The virtual machine
+ :type node: :class:`CloudStackNode`
+
+ :rtype: :class:`CloudStackPortForwardingRule`
+ """
+ args = {
+ 'ipaddressid': address.id,
+ 'protocol': protocol,
+ 'privateport': int(private_port),
+ 'publicport': int(public_port),
+ 'virtualmachineid': node.id,
+ 'openfirewall': openfirewall
+ }
+ if public_end_port:
+ args['publicendport'] = int(public_end_port)
+ if private_end_port:
+ args['privateendport'] = int(private_end_port)
+
+ result = self._async_request(command='createPortForwardingRule',
+ params=args,
+ method='GET')
+ rule = CloudStackPortForwardingRule(node,
+ result['portforwardingrule']
+ ['id'],
+ address,
+ protocol,
+ public_port,
+ private_port,
+ public_end_port,
+ private_end_port)
+ node.extra['port_forwarding_rules'].append(rule)
+ node.public_ips.append(address.address)
+ return rule
+
+ def ex_delete_port_forwarding_rule(self, node, rule):
+ """
+ Remove a Port forwarding rule.
+
+ :param node: Node used in the rule
+ :type node: :class:`CloudStackNode`
+
+ :param rule: Forwarding rule which should be used
+ :type rule: :class:`CloudStackPortForwardingRule`
+
+ :rtype: ``bool``
+ """
+
+ node.extra['port_forwarding_rules'].remove(rule)
+ node.public_ips.remove(rule.address.address)
+ res = self._async_request(command='deletePortForwardingRule',
+ params={'id': rule.id},
+ method='GET')
+ return res['success']
+
+ def ex_create_ip_forwarding_rule(self, node, address, protocol,
+ start_port, end_port=None):
+ """
+ "Add a NAT/firewall forwarding rule.
+
+ :param node: Node which should be used
+ :type node: :class:`CloudStackNode`
+
+ :param address: CloudStackAddress which should be used
+ :type address: :class:`CloudStackAddress`
+
+ :param protocol: Protocol which should be used (TCP or UDP)
+ :type protocol: ``str``
+
+ :param start_port: Start port which should be used
+ :type start_port: ``int``
+
+ :param end_port: End port which should be used
+ :type end_port: ``int``
+
+ :rtype: :class:`CloudStackForwardingRule`
+ """
+
+ protocol = protocol.upper()
+ if protocol not in ('TCP', 'UDP'):
+ return None
+
+ args = {
+ 'ipaddressid': address.id,
+ 'protocol': protocol,
+ 'startport': int(start_port)
+ }
+ if end_port is not None:
+ args['endport'] = int(end_port)
+
+ result = self._async_request(command='createIpForwardingRule',
+ params=args,
+ method='GET')
+ result = result['ipforwardingrule']
+ rule = CloudStackIPForwardingRule(node, result['id'], address,
+ protocol, start_port, end_port)
+ node.extra['ip_forwarding_rules'].append(rule)
+ return rule
+
+ def ex_delete_ip_forwarding_rule(self, node, rule):
+ """
+ Remove a NAT/firewall forwarding rule.
+
+ :param node: Node which should be used
+ :type node: :class:`CloudStackNode`
+
+ :param rule: Forwarding rule which should be used
+ :type rule: :class:`CloudStackForwardingRule`
+
+ :rtype: ``bool``
+ """
+
+ node.extra['ip_forwarding_rules'].remove(rule)
+ self._async_request(command='deleteIpForwardingRule',
+ params={'id': rule.id},
+ method='GET')
+ return True
+
+ def ex_list_keypairs(self, **kwargs):
+ """
+ List Registered SSH Key Pairs
+
+ :param projectid: list objects by project
+ :type projectid: ``str``
+
+ :param page: The page to list the keypairs from
+ :type page: ``int``
+
+ :param keyword: List by keyword
+ :type keyword: ``str``
+
+ :param listall: If set to false, list only resources
+ belonging to the command's caller;
+ if set to true - list resources that
+ the caller is authorized to see.
+ Default value is false
+
+ :type listall: ``bool``
+
+ :param pagesize: The number of results per page
+ :type pagesize: ``int``
+
+ :param account: List resources by account.
+ Must be used with the domainId parameter
+ :type account: ``str``
+
+ :param isrecursive: Defaults to false, but if true,
+ lists all resources from
+ the parent specified by the
+ domainId till leaves.
+ :type isrecursive: ``bool``
+
+ :param fingerprint: A public key fingerprint to look for
+ :type fingerprint: ``str``
+
+ :param name: A key pair name to look for
+ :type name: ``str``
+
+ :param domainid: List only resources belonging to
+ the domain specified
+ :type domainid: ``str``
+
+ :return: A list of keypair dictionaries
+ :rtype: ``list`` of ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'list_key_pairs method')
+
+ key_pairs = self.list_key_pairs(**kwargs)
+
+ result = []
+
+ for key_pair in key_pairs:
+ item = {
+ 'name': key_pair.name,
+ 'fingerprint': key_pair.fingerprint,
+ 'privateKey': key_pair.private_key
+ }
+ result.append(item)
+
+ return result
+
+ def ex_create_keypair(self, name, **kwargs):
+ """
+ Creates a SSH KeyPair, returns fingerprint and private key
+
+ :param name: Name of the keypair (required)
+ :type name: ``str``
+
+ :param projectid: An optional project for the ssh key
+ :type projectid: ``str``
+
+ :param domainid: An optional domainId for the ssh key.
+ If the account parameter is used,
+ domainId must also be used.
+ :type domainid: ``str``
+
+ :param account: An optional account for the ssh key.
+ Must be used with domainId.
+ :type account: ``str``
+
+ :return: A keypair dictionary
+ :rtype: ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'create_key_pair method')
+
+ key_pair = self.create_key_pair(name=name, **kwargs)
+
+ result = {
+ 'name': key_pair.name,
+ 'fingerprint': key_pair.fingerprint,
+ 'privateKey': key_pair.private_key
+ }
+
+ return result
+
+ def ex_import_keypair_from_string(self, name, key_material):
+ """
+ Imports a new public key where the public key is passed in as a string
+
+ :param name: The name of the public key to import.
+ :type name: ``str``
+
+ :param key_material: The contents of a public key file.
+ :type key_material: ``str``
+
+ :rtype: ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'import_key_pair_from_string method')
+
+ key_pair = self.import_key_pair_from_string(name=name,
+ key_material=key_material)
+ result = {
+ 'keyName': key_pair.name,
+ 'keyFingerprint': key_pair.fingerprint
+ }
+
+ return result
+
+ def ex_import_keypair(self, name, keyfile):
+ """
+ Imports a new public key where the public key is passed via a filename
+
+ :param name: The name of the public key to import.
+ :type name: ``str``
+
+ :param keyfile: The filename with path of the public key to import.
+ :type keyfile: ``str``
+
+ :rtype: ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'import_key_pair_from_file method')
+
+ key_pair = self.import_key_pair_from_file(name=name,
+ key_file_path=keyfile)
+ result = {
+ 'keyName': key_pair.name,
+ 'keyFingerprint': key_pair.fingerprint
+ }
+
+ return result
+
+ def ex_delete_keypair(self, keypair, **kwargs):
+ """
+ Deletes an existing SSH KeyPair
+
+ :param keypair: Name of the keypair (required)
+ :type keypair: ``str``
+
+ :param projectid: The project associated with keypair
+ :type projectid: ``str``
+
+ :param domainid: The domain ID associated with the keypair
+ :type domainid: ``str``
+
+ :param account: The account associated with the keypair.
+ Must be used with the domainId parameter.
+ :type account: ``str``
+
+ :return: True of False based on success of Keypair deletion
+ :rtype: ``bool``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'delete_key_pair method')
+
+ key_pair = KeyPair(name=keypair, public_key=None, fingerprint=None,
+ driver=self)
+
+ return self.delete_key_pair(key_pair=key_pair)
+
+ def ex_list_security_groups(self, **kwargs):
+ """
+ Lists Security Groups
+
+ :param domainid: List only resources belonging to the domain specified
+ :type domainid: ``str``
+
+ :param account: List resources by account. Must be used with
+ the domainId parameter.
+ :type account: ``str``
+
+ :param listall: If set to false, list only resources belonging to
+ the command's caller; if set to true
+ list resources that the caller is
+ authorized to see.
+ Default value is false
+ :type listall: ``bool``
+
+ :param pagesize: Number of entries per page
+ :type pagesize: ``int``
+
+ :param keyword: List by keyword
+ :type keyword: ``str``
+
+ :param tags: List resources by tags (key/value pairs)
+ :type tags: ``dict``
+
+ :param id: list the security group by the id provided
+ :type id: ``str``
+
+ :param securitygroupname: lists security groups by name
+ :type securitygroupname: ``str``
+
+ :param virtualmachineid: lists security groups by virtual machine id
+ :type virtualmachineid: ``str``
+
+ :param projectid: list objects by project
+ :type projectid: ``str``
+
+ :param isrecursive: (boolean) defaults to false, but if true,
+ lists all resources from the parent
+ specified by the domainId till leaves.
+ :type isrecursive: ``bool``
+
+ :param page: (integer)
+ :type page: ``int``
+
+ :rtype ``list``
+ """
+ extra_args = kwargs.copy()
+ res = self._sync_request(command='listSecurityGroups',
+ params=extra_args,
+ method='GET')
+
+ security_groups = res.get('securitygroup', [])
+ return security_groups
+
+ def ex_create_security_group(self, name, **kwargs):
+ """
+ Creates a new Security Group
+
+ :param name: name of the security group (required)
+ :type name: ``str``
+
+ :param account: An optional account for the security group.
+ Must be used with domainId.
+ :type account: ``str``
+
+ :param domainid: An optional domainId for the security group.
+ If the account parameter is used,
+ domainId must also be used.
+ :type domainid: ``str``
+
+ :param description: The description of the security group
+ :type description: ``str``
+
+ :param projectid: Deploy vm for the project
+ :type projectid: ``str``
+
+ :rtype: ``dict``
+ """
+
+ extra_args = kwargs.copy()
+
+ for sg in self.ex_list_security_groups():
+ if name in sg['name']:
+ raise LibcloudError('This Security Group name already exists')
+
+ params = {'name': name}
+ params.update(extra_args)
+
+ return self._sync_request(command='createSecurityGroup',
+ params=params,
+ method='GET')['securitygroup']
+
+ def ex_delete_security_group(self, name):
+ """
+ Deletes a given Security Group
+
+ :param domainid: The domain ID of account owning
+ the security group
+ :type domainid: ``str``
+
+ :param id: The ID of the security group.
+ Mutually exclusive with name parameter
+ :type id: ``str``
+
+ :param name: The ID of the security group.
+ Mutually exclusive with id parameter
+ :type name: ``str``
+
+ :param account: The account of the security group.
+ Must be specified with domain ID
+ :type account: ``str``
+
+ :param projectid: The project of the security group
+ :type projectid: ``str``
+
+ :rtype: ``bool``
+ """
+
+ return self._sync_request(command='deleteSecurityGroup',
+ params={'name': name},
+ method='GET')['success']
+
+ def ex_authorize_security_group_ingress(self, securitygroupname,
+ protocol, cidrlist, startport,
+ endport=None):
+ """
+ Creates a new Security Group Ingress rule
+
+ :param domainid: An optional domainId for the security group.
+ If the account parameter is used,
+ domainId must also be used.
+ :type domainid: ``str``
+
+ :param startport: Start port for this ingress rule
+ :type startport: ``int``
+
+ :param securitygroupid: The ID of the security group.
+ Mutually exclusive with securityGroupName
+ parameter
+ :type securitygroupid: ``str``
+
+ :param cidrlist: The cidr list associated
+ :type cidrlist: ``list``
+
+ :param usersecuritygrouplist: user to security group mapping
+ :type usersecuritygrouplist: ``dict``
+
+ :param securitygroupname: The name of the security group.
+ Mutually exclusive with
+ securityGroupName parameter
+ :type securitygroupname: ``str``
+
+ :param account: An optional account for the security group.
+ Must be used with domainId.
+ :type account: ``str``
+
+ :param icmpcode: Error code for this icmp message
+ :type icmpcode: ``int``
+
+ :param protocol: TCP is default. UDP is the other supported protocol
+ :type protocol: ``str``
+
+ :param icmptype: type of the icmp message being sent
+ :type icmptype: ``int``
+
+ :param projectid: An optional project of the security group
+ :type projectid: ``str``
+
+ :param endport: end port for this ingress rule
+ :type endport: ``int``
+
+ :rtype: ``list``
+ """
+
+ protocol = protocol.upper()
+ if protocol not in ('TCP', 'ICMP'):
+ raise LibcloudError('Only TCP and ICMP are allowed')
+
+ args = {
+ 'securitygroupname': securitygroupname,
+ 'protocol': protocol,
+ 'startport': int(startport),
+ 'cidrlist': cidrlist
+ }
+ if endport is None:
+ args['endport'] = int(startport)
+
+ return self._async_request(command='authorizeSecurityGroupIngress',
+ params=args,
+ method='GET')['securitygroup']
+
+ def ex_revoke_security_group_ingress(self, rule_id):
+ """
+ Revoke/delete an ingress security rule
+
+ :param id: The ID of the ingress security rule
+ :type id: ``str``
+
+ :rtype: ``bool``
+ """
+
+ self._async_request(command='revokeSecurityGroupIngress',
+ params={'id': rule_id},
+ method='GET')
+ return True
+
+ def ex_register_iso(self, name, url, location=None, **kwargs):
+ """
+ Registers an existing ISO by URL.
+
+ :param name: Name which should be used
+ :type name: ``str``
+
+ :param url: Url should be used
+ :type url: ``str``
+
+ :param location: Location which should be used
+ :type location: :class:`NodeLocation`
+
+ :rtype: ``str``
+ """
+ if location is None:
+ location = self.list_locations()[0]
+
+ params = {'name': name,
+ 'displaytext': name,
+ 'url': url,
+ 'zoneid': location.id}
+ params['bootable'] = kwargs.pop('bootable', False)
+ if params['bootable']:
+ os_type_id = kwargs.pop('ostypeid', None)
+
+ if not os_type_id:
+ raise LibcloudError('If bootable=True, ostypeid is required!')
+
+ params['ostypeid'] = os_type_id
+
+ return self._sync_request(command='registerIso',
+ name=name,
+ displaytext=name,
+ url=url,
+ zoneid=location.id,
+ params=params)
+
+ def ex_limits(self):
+ """
+ Extra call to get account's resource limits, such as
+ the amount of instances, volumes, snapshots and networks.
+
+ CloudStack uses integers as the resource type so we will convert
+ them to a more human readable string using the resource map
+
+ A list of the resource type mappings can be found at
+ http://goo.gl/17C6Gk
+
+ :return: dict
+ :rtype: ``dict``
+ """
+
+ result = self._sync_request(command='listResourceLimits',
+ method='GET')
+
+ limits = {}
+ resource_map = {
+ 0: 'max_instances',
+ 1: 'max_public_ips',
+ 2: 'max_volumes',
+ 3: 'max_snapshots',
+ 4: 'max_images',
+ 5: 'max_projects',
+ 6: 'max_networks',
+ 7: 'max_vpc',
+ 8: 'max_cpu',
+ 9: 'max_memory',
+ 10: 'max_primary_storage',
+ 11: 'max_secondary_storage'
+ }
+
+ for limit in result.get('resourcelimit', []):
+ # We will ignore unknown types
+ resource = resource_map.get(int(limit['resourcetype']), None)
+ if not resource:
+ continue
+ limits[resource] = int(limit['max'])
+
+ return limits
+
+ def ex_create_tags(self, resource_ids, resource_type, tags):
+ """
+ Create tags for a resource (Node/StorageVolume/etc).
+ A list of resource types can be found at http://goo.gl/6OKphH
+
+ :param resource_ids: Resource IDs to be tagged. The resource IDs must
+ all be associated with the resource_type.
+ For example, for virtual machines (UserVm) you
+ can only specify a list of virtual machine IDs.
+ :type resource_ids: ``list`` of resource IDs
+
+ :param resource_type: Resource type (eg: UserVm)
+ :type resource_type: ``str``
+
+ :param tags: A dictionary or other mapping of strings to strings,
+ associating tag names with tag values.
+ :type tags: ``dict``
+
+ :rtype: ``bool``
+ """
+ params = {'resourcetype': resource_type,
+ 'resourceids': ','.join(resource_ids)}
+
+ for i, key in enumerate(tags):
+ params['tags[%d].key' % i] = key
+ params['tags[%d].value' % i] = tags[key]
+
+ self._async_request(command='createTags',
+ params=params,
+ method='GET')
+ return True
+
+ def ex_delete_tags(self, resource_ids, resource_type, tag_keys):
+ """
+ Delete tags from a resource.
+
+ :param resource_ids: Resource IDs to be tagged. The resource IDs must
+ all be associated with the resource_type.
+ For example, for virtual machines (UserVm) you
+ can only specify a list of virtual machine IDs.
+ :type resource_ids: ``list`` of resource IDs
+
+ :param resource_type: Resource type (eg: UserVm)
+ :type resource_type: ``str``
+
+ :param tag_keys: A list of keys to delete. CloudStack only requires
+ the keys from the key/value pair.
+ :type tag_keys: ``list``
+
+ :rtype: ``bool``
+ """
+ params = {'resourcetype': resource_type,
+ 'resourceids': ','.join(resource_ids)}
+
+ for i, key in enumerate(tag_keys):
+ params['tags[%s].key' % i] = key
+
+ self._async_request(command='deleteTags',
+ params=params,
+ method='GET')
+
+ return True
+
+ def _to_node(self, data, public_ips=None):
+ """
+ :param data: Node data object.
+ :type data: ``dict``
+
+ :param public_ips: A list of additional IP addresses belonging to
+ this node. (optional)
+ :type public_ips: ``list`` or ``None``
+ """
+ id = data['id']
+
+ if 'name' in data:
+ name = data['name']
+ elif 'displayname' in data:
+ name = data['displayname']
+ else:
+ name = None
+
+ state = self.NODE_STATE_MAP[data['state']]
+
+ public_ips = public_ips if public_ips else []
+ private_ips = []
+
+ for nic in data['nic']:
+ if is_private_subnet(nic['ipaddress']):
+ private_ips.append(nic['ipaddress'])
+ else:
+ public_ips.append(nic['ipaddress'])
+
+ security_groups = data.get('securitygroup', [])
+
+ if security_groups:
+ security_groups = [sg['name'] for sg in security_groups]
+
+ created = data.get('created', False)
+
+ extra = self._get_extra_dict(data,
+ RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
+
+ # Add additional parameters to extra
+ extra['security_group'] = security_groups
+ extra['ip_addresses'] = []
+ extra['ip_forwarding_rules'] = []
+ extra['port_forwarding_rules'] = []
+ extra['created'] = created
+
+ if 'tags' in data:
+ extra['tags'] = self._get_resource_tags(data['tags'])
+
+ node = CloudStackNode(id=id, name=name, state=state,
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self, extra=extra)
+ return node
+
+ def _to_key_pairs(self, data):
+ key_pairs = [self._to_key_pair(data=item) for item in data]
+ return key_pairs
+
+ def _to_key_pair(self, data):
+ key_pair = KeyPair(name=data['name'],
+ fingerprint=data['fingerprint'],
+ public_key=data.get('publickey', None),
+ private_key=data.get('privatekey', None),
+ driver=self)
+ return key_pair
+
+ def _get_resource_tags(self, tag_set):
+ """
+ Parse tags from the provided element and return a dictionary with
+ key/value pairs.
+
+ :param tag_set: A list of key/value tag pairs
+ :type tag_set: ``list```
+
+ :rtype: ``dict``
+ """
+ tags = {}
+
+ for tag in tag_set:
+ for key, value in tag.iteritems():
+ key = tag['key']
+ value = tag['value']
+ tags[key] = value
+
+ return tags
+
+ def _get_extra_dict(self, response, mapping):
+ """
+ Extract attributes from the element based on rules provided in the
+ mapping dictionary.
+
+ :param response: The JSON response to parse the values from.
+ :type response: ``dict``
+
+ :param mapping: Dictionary with the extra layout
+ :type mapping: ``dict``
+
+ :rtype: ``dict``
+ """
+ extra = {}
+ for attribute, values in mapping.items():
+ transform_func = values['transform_func']
+ value = response.get(values['key_name'], None)
+
+ if value is not None:
+ extra[attribute] = transform_func(value)
+ else:
+ extra[attribute] = None
+
+ return extra
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/digitalocean.py b/awx/lib/site-packages/libcloud/compute/drivers/digitalocean.py
new file mode 100644
index 0000000000..7dbed418c0
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/digitalocean.py
@@ -0,0 +1,224 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Digital Ocean Driver
+"""
+
+from libcloud.utils.py3 import httplib
+
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse
+from libcloud.compute.types import Provider, NodeState, InvalidCredsError
+from libcloud.compute.base import NodeDriver
+from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
+
+
+class DigitalOceanResponse(JsonResponse):
+ def parse_error(self):
+ if self.status == httplib.FOUND and '/api/error' in self.body:
+ # Hacky, but DigitalOcean error responses are awful
+ raise InvalidCredsError(self.body)
+ elif self.status == httplib.UNAUTHORIZED:
+ body = self.parse_body()
+ raise InvalidCredsError(body['message'])
+ else:
+ body = self.parse_body()
+
+ if 'error_message' in body:
+ error = '%s (code: %s)' % (body['error_message'], self.status)
+ else:
+ error = body
+ return error
+
+
+class SSHKey(object):
+ def __init__(self, id, name, pub_key):
+ self.id = id
+ self.name = name
+ self.pub_key = pub_key
+
+ def __repr__(self):
+ return (('') %
+ (self.id, self.name, self.pub_key))
+
+
+class DigitalOceanConnection(ConnectionUserAndKey):
+ """
+ Connection class for the DigitalOcean driver.
+ """
+
+ host = 'api.digitalocean.com'
+ responseCls = DigitalOceanResponse
+
+ def add_default_params(self, params):
+ """
+ Add parameters that are necessary for every request
+
+ This method adds ``client_id`` and ``api_key`` to
+ the request.
+ """
+ params['client_id'] = self.user_id
+ params['api_key'] = self.key
+ return params
+
+
+class DigitalOceanNodeDriver(NodeDriver):
+ """
+ DigitalOceanNode node driver.
+ """
+
+ connectionCls = DigitalOceanConnection
+
+ type = Provider.DIGITAL_OCEAN
+ name = 'Digital Ocean'
+ website = 'https://www.digitalocean.com'
+
+ NODE_STATE_MAP = {'new': NodeState.PENDING,
+ 'off': NodeState.REBOOTING,
+ 'active': NodeState.RUNNING}
+
+ def list_nodes(self):
+ data = self.connection.request('/droplets').object['droplets']
+ return list(map(self._to_node, data))
+
+ def list_locations(self):
+ data = self.connection.request('/regions').object['regions']
+ return list(map(self._to_location, data))
+
+ def list_images(self):
+ data = self.connection.request('/images').object['images']
+ return list(map(self._to_image, data))
+
+ def list_sizes(self):
+ data = self.connection.request('/sizes').object['sizes']
+ return list(map(self._to_size, data))
+
+ def create_node(self, name, size, image, location, ex_ssh_key_ids=None,
+ **kwargs):
+ """
+ Create a node.
+
+ :keyword ex_ssh_key_ids: A list of ssh key ids which will be added
+ to the server. (optional)
+ :type ex_ssh_key_ids: ``list`` of ``str``
+
+ :return: The newly created node.
+ :rtype: :class:`Node`
+ """
+ params = {'name': name, 'size_id': size.id, 'image_id': image.id,
+ 'region_id': location.id}
+
+ if ex_ssh_key_ids:
+ params['ssh_key_ids'] = ','.join(ex_ssh_key_ids)
+
+ data = self.connection.request('/droplets/new', params=params).object
+ return self._to_node(data=data['droplet'])
+
+ def reboot_node(self, node):
+ res = self.connection.request('/droplets/%s/reboot/' % (node.id))
+ return res.status == httplib.OK
+
+ def destroy_node(self, node):
+ params = {'scrub_data': '1'}
+ res = self.connection.request('/droplets/%s/destroy/' % (node.id),
+ params=params)
+ return res.status == httplib.OK
+
+ def ex_rename_node(self, node, name):
+ params = {'name': name}
+ res = self.connection.request('/droplets/%s/rename/' % (node.id),
+ params=params)
+ return res.status == httplib.OK
+
+ def ex_list_ssh_keys(self):
+ """
+ List all the available SSH keys.
+
+ :return: Available SSH keys.
+ :rtype: ``list`` of :class:`SSHKey`
+ """
+ data = self.connection.request('/ssh_keys').object['ssh_keys']
+ return list(map(self._to_ssh_key, data))
+
+ def ex_create_ssh_key(self, name, ssh_key_pub):
+ """
+ Create a new SSH key.
+
+ :param name: Key name (required)
+ :type name: ``str``
+
+ :param name: Valid public key string (required)
+ :type name: ``str``
+ """
+ params = {'name': name, 'ssh_pub_key': ssh_key_pub}
+ data = self.connection.request('/ssh_keys/new/', method='GET',
+ params=params).object
+ assert 'ssh_key' in data
+ return self._to_ssh_key(data=data['ssh_key'])
+
+ def ex_destroy_ssh_key(self, key_id):
+ """
+ Delete an existing SSH key.
+
+ :param key_id: SSH key id (required)
+ :type key_id: ``str``
+ """
+ res = self.connection.request('/ssh_keys/%s/destroy/' % (key_id))
+ return res.status == httplib.OK
+
+ def _to_node(self, data):
+ extra_keys = ['backups_active', 'region_id']
+ if 'status' in data:
+ state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
+ else:
+ state = NodeState.UNKNOWN
+
+ if 'ip_address' in data and data['ip_address'] is not None:
+ public_ips = [data['ip_address']]
+ else:
+ public_ips = []
+
+ extra = {}
+ for key in extra_keys:
+ if key in data:
+ extra[key] = data[key]
+
+ node = Node(id=data['id'], name=data['name'], state=state,
+ public_ips=public_ips, private_ips=None, extra=extra,
+ driver=self)
+ return node
+
+ def _to_image(self, data):
+ extra = {'distribution': data['distribution']}
+ return NodeImage(id=data['id'], name=data['name'], extra=extra,
+ driver=self)
+
+ def _to_location(self, data):
+ return NodeLocation(id=data['id'], name=data['name'], country=None,
+ driver=self)
+
+ def _to_size(self, data):
+ ram = data['name'].lower()
+
+ if 'mb' in ram:
+ ram = int(ram.replace('mb', ''))
+ elif 'gb' in ram:
+ ram = int(ram.replace('gb', '')) * 1024
+
+ return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0,
+ bandwidth=0, price=0, driver=self)
+
+ def _to_ssh_key(self, data):
+ return SSHKey(id=data['id'], name=data['name'],
+ pub_key=data.get('ssh_pub_key', None))
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/dreamhost.py b/awx/lib/site-packages/libcloud/compute/drivers/dreamhost.py
new file mode 100644
index 0000000000..cba445c39f
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/dreamhost.py
@@ -0,0 +1,242 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+DreamHost Driver
+"""
+
+import copy
+
+from libcloud.common.base import ConnectionKey, JsonResponse
+from libcloud.common.types import InvalidCredsError
+from libcloud.compute.base import Node, NodeDriver, NodeSize
+from libcloud.compute.base import NodeImage
+from libcloud.compute.types import Provider, NodeState
+
+# DreamHost Private Servers can be resized on the fly, but Libcloud doesn't
+# currently support extensions to its interface, so we'll put some basic sizes
+# in for node creation.
+
+DH_PS_SIZES = {
+ 'minimum': {
+ 'id': 'minimum',
+ 'name': 'Minimum DH PS size',
+ 'ram': 300,
+ 'disk': None,
+ 'bandwidth': None
+ },
+ 'maximum': {
+ 'id': 'maximum',
+ 'name': 'Maximum DH PS size',
+ 'ram': 4000,
+ 'disk': None,
+ 'bandwidth': None
+ },
+ 'default': {
+ 'id': 'default',
+ 'name': 'Default DH PS size',
+ 'ram': 2300,
+ 'disk': None,
+ 'bandwidth': None
+ },
+ 'low': {
+ 'id': 'low',
+ 'name': 'DH PS with 1GB RAM',
+ 'ram': 1000,
+ 'disk': None,
+ 'bandwidth': None
+ },
+ 'high': {
+ 'id': 'high',
+ 'name': 'DH PS with 3GB RAM',
+ 'ram': 3000,
+ 'disk': None,
+ 'bandwidth': None
+ },
+}
+
+
+class DreamhostAPIException(Exception):
+ def __str__(self):
+ return self.args[0]
+
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class DreamhostResponse(JsonResponse):
+ """
+ Response class for DreamHost PS
+ """
+
+ def parse_body(self):
+ resp = super(DreamhostResponse, self).parse_body()
+ if resp['result'] != 'success':
+ raise Exception(self._api_parse_error(resp))
+ return resp['data']
+
+ def parse_error(self):
+ raise Exception
+
+ def _api_parse_error(self, response):
+ if 'data' in response:
+ if response['data'] == 'invalid_api_key':
+ raise InvalidCredsError(
+ "Oops! You've entered an invalid API key")
+ else:
+ raise DreamhostAPIException(response['data'])
+ else:
+ raise DreamhostAPIException("Unknown problem: %s" % (self.body))
+
+
+class DreamhostConnection(ConnectionKey):
+ """
+ Connection class to connect to DreamHost's API servers
+ """
+
+ host = 'api.dreamhost.com'
+ responseCls = DreamhostResponse
+ format = 'json'
+
+ def add_default_params(self, params):
+ """
+ Add key and format parameters to the request. Eventually should add
+ unique_id to prevent re-execution of a single request.
+ """
+ params['key'] = self.key
+ params['format'] = self.format
+ # params['unique_id'] = generate_unique_id()
+ return params
+
+
+class DreamhostNodeDriver(NodeDriver):
+ """
+ Node Driver for DreamHost PS
+ """
+ type = Provider.DREAMHOST
+ api_name = 'dreamhost'
+ name = "Dreamhost"
+ website = 'http://dreamhost.com/'
+ connectionCls = DreamhostConnection
+
+ _sizes = DH_PS_SIZES
+
+ def create_node(self, **kwargs):
+ """Create a new Dreamhost node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_movedata: Copy all your existing users to this new PS
+ :type ex_movedata: ``str``
+ """
+ size = kwargs['size'].ram
+ params = {
+ 'cmd': 'dreamhost_ps-add_ps',
+ 'movedata': kwargs.get('movedata', 'no'),
+ 'type': kwargs['image'].name,
+ 'size': size
+ }
+ data = self.connection.request('/', params).object
+ return Node(
+ id=data['added_web'],
+ name=data['added_web'],
+ state=NodeState.PENDING,
+ public_ips=[],
+ private_ips=[],
+ driver=self.connection.driver,
+ extra={
+ 'type': kwargs['image'].name
+ }
+ )
+
+ def destroy_node(self, node):
+ params = {
+ 'cmd': 'dreamhost_ps-remove_ps',
+ 'ps': node.id
+ }
+ try:
+ return self.connection.request('/', params).success()
+ except DreamhostAPIException:
+ return False
+
+ def reboot_node(self, node):
+ params = {
+ 'cmd': 'dreamhost_ps-reboot',
+ 'ps': node.id
+ }
+ try:
+ return self.connection.request('/', params).success()
+ except DreamhostAPIException:
+ return False
+
+ def list_nodes(self, **kwargs):
+ data = self.connection.request(
+ '/', {'cmd': 'dreamhost_ps-list_ps'}).object
+ return [self._to_node(n) for n in data]
+
+ def list_images(self, **kwargs):
+ data = self.connection.request(
+ '/', {'cmd': 'dreamhost_ps-list_images'}).object
+ images = []
+ for img in data:
+ images.append(NodeImage(
+ id=img['image'],
+ name=img['image'],
+ driver=self.connection.driver
+ ))
+ return images
+
+ def list_sizes(self, **kwargs):
+ sizes = []
+ for key, values in self._sizes.items():
+ attributes = copy.deepcopy(values)
+ attributes.update({'price': self._get_size_price(size_id=key)})
+ sizes.append(NodeSize(driver=self.connection.driver, **attributes))
+
+ return sizes
+
+ def list_locations(self, **kwargs):
+ raise NotImplementedError(
+ 'You cannot select a location for '
+ 'DreamHost Private Servers at this time.')
+
+ def _resize_node(self, node, size):
+ if (size < 300 or size > 4000):
+ return False
+
+ params = {
+ 'cmd': 'dreamhost_ps-set_size',
+ 'ps': node.id,
+ 'size': size
+ }
+ try:
+ return self.connection.request('/', params).success()
+ except DreamhostAPIException:
+ return False
+
+ def _to_node(self, data):
+ """
+ Convert the data from a DreamhostResponse object into a Node
+ """
+ return Node(
+ id=data['ps'],
+ name=data['ps'],
+ state=NodeState.UNKNOWN,
+ public_ips=[data['ip']],
+ private_ips=[],
+ driver=self.connection.driver,
+ extra={
+ 'current_size': data['memory_mb'],
+ 'account_id': data['account_id'],
+ 'type': data['type']})
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/dummy.py b/awx/lib/site-packages/libcloud/compute/drivers/dummy.py
new file mode 100644
index 0000000000..982433525b
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/dummy.py
@@ -0,0 +1,349 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Dummy Driver
+
+@note: This driver is out of date
+"""
+import uuid
+import socket
+import struct
+
+from libcloud.common.base import ConnectionKey
+from libcloud.compute.base import NodeImage, NodeSize, Node
+from libcloud.compute.base import NodeDriver, NodeLocation
+from libcloud.compute.base import KeyPair
+from libcloud.compute.types import Provider, NodeState
+
+
+class DummyConnection(ConnectionKey):
+ """
+ Dummy connection class
+ """
+
+ def connect(self, host=None, port=None):
+ pass
+
+
+class DummyNodeDriver(NodeDriver):
+ """
+ Dummy node driver
+
+ This is a fake driver which appears to always create or destroy
+ nodes successfully.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> node=driver.create_node()
+ >>> node.public_ips[0]
+ '127.0.0.3'
+ >>> node.name
+ 'dummy-3'
+
+ If the credentials you give convert to an integer then the next
+ node to be created will be one higher.
+
+ Each time you create a node you will get a different IP address.
+
+ >>> driver = DummyNodeDriver(22)
+ >>> node=driver.create_node()
+ >>> node.name
+ 'dummy-23'
+
+ """
+
+ name = "Dummy Node Provider"
+ website = 'http://example.com'
+ type = Provider.DUMMY
+
+ def __init__(self, creds):
+ """
+ :param creds: Credentials
+ :type creds: ``str``
+
+ :rtype: ``None``
+ """
+ self.creds = creds
+ try:
+ num = int(creds)
+ except ValueError:
+ num = None
+ if num:
+ self.nl = []
+ startip = _ip_to_int('127.0.0.1')
+ for i in range(num):
+ ip = _int_to_ip(startip + i)
+ self.nl.append(
+ Node(id=i,
+ name='dummy-%d' % (i),
+ state=NodeState.RUNNING,
+ public_ips=[ip],
+ private_ips=[],
+ driver=self,
+ extra={'foo': 'bar'})
+ )
+ else:
+ self.nl = [
+ Node(id=1,
+ name='dummy-1',
+ state=NodeState.RUNNING,
+ public_ips=['127.0.0.1'],
+ private_ips=[],
+ driver=self,
+ extra={'foo': 'bar'}),
+ Node(id=2,
+ name='dummy-2',
+ state=NodeState.RUNNING,
+ public_ips=['127.0.0.1'],
+ private_ips=[],
+ driver=self,
+ extra={'foo': 'bar'}),
+ ]
+ self.connection = DummyConnection(self.creds)
+
+ def get_uuid(self, unique_field=None):
+ """
+
+ :param unique_field: Unique field
+ :type unique_field: ``bool``
+ :rtype: :class:`UUID`
+ """
+ return str(uuid.uuid4())
+
+ def list_nodes(self):
+ """
+ List the nodes known to a particular driver;
+ There are two default nodes created at the beginning
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> node_list=driver.list_nodes()
+ >>> sorted([node.name for node in node_list ])
+ ['dummy-1', 'dummy-2']
+
+ each item in the list returned is a node object from which you
+ can carry out any node actions you wish
+
+ >>> node_list[0].reboot()
+ True
+
+ As more nodes are added, list_nodes will return them
+
+ >>> node=driver.create_node()
+ >>> node.size.id
+ 's1'
+ >>> node.image.id
+ 'i2'
+ >>> sorted([n.name for n in driver.list_nodes()])
+ ['dummy-1', 'dummy-2', 'dummy-3']
+
+ @inherits: :class:`NodeDriver.list_nodes`
+ """
+ return self.nl
+
+ def reboot_node(self, node):
+ """
+ Sets the node state to rebooting; in this dummy driver always
+ returns True as if the reboot had been successful.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> node=driver.create_node()
+ >>> from libcloud.compute.types import NodeState
+ >>> node.state == NodeState.RUNNING
+ True
+ >>> node.state == NodeState.REBOOTING
+ False
+ >>> driver.reboot_node(node)
+ True
+ >>> node.state == NodeState.REBOOTING
+ True
+
+ Please note, dummy nodes never recover from the reboot.
+
+ @inherits: :class:`NodeDriver.reboot_node`
+ """
+
+ node.state = NodeState.REBOOTING
+ return True
+
+ def destroy_node(self, node):
+ """
+ Sets the node state to terminated and removes it from the node list
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> from libcloud.compute.types import NodeState
+ >>> node = [node for node in driver.list_nodes() if
+ ... node.name == 'dummy-1'][0]
+ >>> node.state == NodeState.RUNNING
+ True
+ >>> driver.destroy_node(node)
+ True
+ >>> node.state == NodeState.RUNNING
+ False
+ >>> [n for n in driver.list_nodes() if n.name == 'dummy-1']
+ []
+
+ @inherits: :class:`NodeDriver.destroy_node`
+ """
+
+ node.state = NodeState.TERMINATED
+ self.nl.remove(node)
+ return True
+
+ def list_images(self, location=None):
+ """
+ Returns a list of images as a cloud provider might have
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> sorted([image.name for image in driver.list_images()])
+ ['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10']
+
+ @inherits: :class:`NodeDriver.list_images`
+ """
+ return [
+ NodeImage(id=1, name="Ubuntu 9.10", driver=self),
+ NodeImage(id=2, name="Ubuntu 9.04", driver=self),
+ NodeImage(id=3, name="Slackware 4", driver=self),
+ ]
+
+ def list_sizes(self, location=None):
+ """
+ Returns a list of node sizes as a cloud provider might have
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> sorted([size.ram for size in driver.list_sizes()])
+ [128, 512, 4096, 8192]
+
+ @inherits: :class:`NodeDriver.list_images`
+ """
+
+ return [
+ NodeSize(id=1,
+ name="Small",
+ ram=128,
+ disk=4,
+ bandwidth=500,
+ price=4,
+ driver=self),
+ NodeSize(id=2,
+ name="Medium",
+ ram=512,
+ disk=16,
+ bandwidth=1500,
+ price=8,
+ driver=self),
+ NodeSize(id=3,
+ name="Big",
+ ram=4096,
+ disk=32,
+ bandwidth=2500,
+ price=32,
+ driver=self),
+ NodeSize(id=4,
+ name="XXL Big",
+ ram=4096 * 2,
+ disk=32 * 4,
+ bandwidth=2500 * 3,
+ price=32 * 2,
+ driver=self),
+ ]
+
+ def list_locations(self):
+ """
+ Returns a list of locations of nodes
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> sorted([loc.name + " in " + loc.country for loc in
+ ... driver.list_locations()])
+ ['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"]
+
+ @inherits: :class:`NodeDriver.list_locations`
+ """
+ return [
+ NodeLocation(id=1,
+ name="Paul's Room",
+ country='US',
+ driver=self),
+ NodeLocation(id=2,
+ name="London Loft",
+ country='GB',
+ driver=self),
+ NodeLocation(id=3,
+ name="Island Datacenter",
+ country='FJ',
+ driver=self),
+ ]
+
+ def create_node(self, **kwargs):
+ """
+ Creates a dummy node; the node id is equal to the number of
+ nodes in the node list
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver(0)
+ >>> sorted([node.name for node in driver.list_nodes()])
+ ['dummy-1', 'dummy-2']
+ >>> nodeA = driver.create_node()
+ >>> sorted([node.name for node in driver.list_nodes()])
+ ['dummy-1', 'dummy-2', 'dummy-3']
+ >>> driver.create_node().name
+ 'dummy-4'
+ >>> driver.destroy_node(nodeA)
+ True
+ >>> sorted([node.name for node in driver.list_nodes()])
+ ['dummy-1', 'dummy-2', 'dummy-4']
+
+ @inherits: :class:`NodeDriver.create_node`
+ """
+ l = len(self.nl) + 1
+ n = Node(id=l,
+ name='dummy-%d' % l,
+ state=NodeState.RUNNING,
+ public_ips=['127.0.0.%d' % l],
+ private_ips=[],
+ driver=self,
+ size=NodeSize(id='s1', name='foo', ram=2048,
+ disk=160, bandwidth=None, price=0.0,
+ driver=self),
+ image=NodeImage(id='i2', name='image', driver=self),
+ extra={'foo': 'bar'})
+ self.nl.append(n)
+ return n
+
+ def import_key_pair_from_string(self, name, key_material):
+ key_pair = KeyPair(name=name,
+ public_key=key_material,
+ fingerprint='fingerprint',
+ private_key='private_key',
+ driver=self)
+ return key_pair
+
+
+def _ip_to_int(ip):
+ return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0])
+
+
+def _int_to_ip(ip):
+ return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip)))
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/ec2.py b/awx/lib/site-packages/libcloud/compute/drivers/ec2.py
new file mode 100644
index 0000000000..c17f072a8c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/ec2.py
@@ -0,0 +1,5770 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Amazon EC2, Eucalyptus, Nimbus and Outscale drivers.
+"""
+
+import re
+import sys
+import base64
+import copy
+import warnings
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.utils.py3 import b, basestring, ensure_string
+
+from libcloud.utils.xml import fixxpath, findtext, findattr, findall
+from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint
+from libcloud.utils.publickey import get_pubkey_comment
+from libcloud.utils.iso8601 import parse_date
+from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
+from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
+ LibcloudError)
+from libcloud.compute.providers import Provider
+from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
+from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot
+from libcloud.compute.base import KeyPair
+from libcloud.compute.types import NodeState, KeyPairDoesNotExistError
+
+__all__ = [
+ 'API_VERSION',
+ 'NAMESPACE',
+ 'INSTANCE_TYPES',
+ 'OUTSCALE_INSTANCE_TYPES',
+ 'OUTSCALE_SAS_REGION_DETAILS',
+ 'OUTSCALE_INC_REGION_DETAILS',
+ 'DEFAULT_EUCA_API_VERSION',
+ 'EUCA_NAMESPACE',
+
+ 'EC2NodeDriver',
+ 'BaseEC2NodeDriver',
+
+ 'NimbusNodeDriver',
+ 'EucNodeDriver',
+
+ 'OutscaleSASNodeDriver',
+ 'OutscaleINCNodeDriver',
+
+ 'EC2NodeLocation',
+ 'EC2ReservedNode',
+ 'EC2SecurityGroup',
+ 'EC2Network',
+ 'EC2NetworkSubnet',
+ 'EC2NetworkInterface',
+ 'EC2RouteTable',
+ 'EC2Route',
+ 'EC2SubnetAssociation',
+ 'ExEC2AvailabilityZone',
+
+ 'IdempotentParamError'
+]
+
+API_VERSION = '2013-10-15'
+NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
+
+# Eucalyptus Constants
+DEFAULT_EUCA_API_VERSION = '3.3.0'
+EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION)
+
+"""
+Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
+From http://aws.amazon.com/ec2/instance-types/
+"""
+INSTANCE_TYPES = {
+ 't1.micro': {
+ 'id': 't1.micro',
+ 'name': 'Micro Instance',
+ 'ram': 613,
+ 'disk': 15,
+ 'bandwidth': None
+ },
+ 'm1.small': {
+ 'id': 'm1.small',
+ 'name': 'Small Instance',
+ 'ram': 1740,
+ 'disk': 160,
+ 'bandwidth': None
+ },
+ 'm1.medium': {
+ 'id': 'm1.medium',
+ 'name': 'Medium Instance',
+ 'ram': 3700,
+ 'disk': 410,
+ 'bandwidth': None
+ },
+ 'm1.large': {
+ 'id': 'm1.large',
+ 'name': 'Large Instance',
+ 'ram': 7680,
+ 'disk': 850,
+ 'bandwidth': None
+ },
+ 'm1.xlarge': {
+ 'id': 'm1.xlarge',
+ 'name': 'Extra Large Instance',
+ 'ram': 15360,
+ 'disk': 1690,
+ 'bandwidth': None
+ },
+ 'c1.medium': {
+ 'id': 'c1.medium',
+ 'name': 'High-CPU Medium Instance',
+ 'ram': 1740,
+ 'disk': 350,
+ 'bandwidth': None
+ },
+ 'c1.xlarge': {
+ 'id': 'c1.xlarge',
+ 'name': 'High-CPU Extra Large Instance',
+ 'ram': 7680,
+ 'disk': 1690,
+ 'bandwidth': None
+ },
+ 'm2.xlarge': {
+ 'id': 'm2.xlarge',
+ 'name': 'High-Memory Extra Large Instance',
+ 'ram': 17510,
+ 'disk': 420,
+ 'bandwidth': None
+ },
+ 'm2.2xlarge': {
+ 'id': 'm2.2xlarge',
+ 'name': 'High-Memory Double Extra Large Instance',
+ 'ram': 35021,
+ 'disk': 850,
+ 'bandwidth': None
+ },
+ 'm2.4xlarge': {
+ 'id': 'm2.4xlarge',
+ 'name': 'High-Memory Quadruple Extra Large Instance',
+ 'ram': 70042,
+ 'disk': 1690,
+ 'bandwidth': None
+ },
+ 'm3.medium': {
+ 'id': 'm3.medium',
+ 'name': 'Medium Instance',
+ 'ram': 3840,
+ 'disk': 4000,
+ 'bandwidth': None
+ },
+ 'm3.large': {
+ 'id': 'm3.large',
+ 'name': 'Large Instance',
+ 'ram': 7168,
+ 'disk': 32000,
+ 'bandwidth': None
+ },
+ 'm3.xlarge': {
+ 'id': 'm3.xlarge',
+ 'name': 'Extra Large Instance',
+ 'ram': 15360,
+ 'disk': 80000,
+ 'bandwidth': None
+ },
+ 'm3.2xlarge': {
+ 'id': 'm3.2xlarge',
+ 'name': 'Double Extra Large Instance',
+ 'ram': 30720,
+ 'disk': 160000,
+ 'bandwidth': None
+ },
+ 'cg1.4xlarge': {
+ 'id': 'cg1.4xlarge',
+ 'name': 'Cluster GPU Quadruple Extra Large Instance',
+ 'ram': 22528,
+ 'disk': 1690,
+ 'bandwidth': None
+ },
+ 'g2.2xlarge': {
+ 'id': 'g2.2xlarge',
+ 'name': 'Cluster GPU G2 Double Extra Large Instance',
+ 'ram': 15000,
+ 'disk': 60,
+ 'bandwidth': None,
+ },
+ 'cc1.4xlarge': {
+ 'id': 'cc1.4xlarge',
+ 'name': 'Cluster Compute Quadruple Extra Large Instance',
+ 'ram': 23552,
+ 'disk': 1690,
+ 'bandwidth': None
+ },
+ 'cc2.8xlarge': {
+ 'id': 'cc2.8xlarge',
+ 'name': 'Cluster Compute Eight Extra Large Instance',
+ 'ram': 63488,
+ 'disk': 3370,
+ 'bandwidth': None
+ },
+ # c3 instances have 2 SSDs of the specified disk size
+ 'c3.large': {
+ 'id': 'c3.large',
+ 'name': 'Compute Optimized Large Instance',
+ 'ram': 3750,
+ 'disk': 16,
+ 'bandwidth': None
+ },
+ 'c3.xlarge': {
+ 'id': 'c3.xlarge',
+ 'name': 'Compute Optimized Extra Large Instance',
+ 'ram': 7000,
+ 'disk': 40,
+ 'bandwidth': None
+ },
+ 'c3.2xlarge': {
+ 'id': 'c3.2xlarge',
+ 'name': 'Compute Optimized Double Extra Large Instance',
+ 'ram': 15000,
+ 'disk': 80,
+ 'bandwidth': None
+ },
+ 'c3.4xlarge': {
+ 'id': 'c3.4xlarge',
+ 'name': 'Compute Optimized Quadruple Extra Large Instance',
+ 'ram': 30000,
+ 'disk': 160,
+ 'bandwidth': None
+ },
+ 'c3.8xlarge': {
+ 'id': 'c3.8xlarge',
+ 'name': 'Compute Optimized Eight Extra Large Instance',
+ 'ram': 60000,
+ 'disk': 320,
+ 'bandwidth': None
+ },
+ 'cr1.8xlarge': {
+ 'id': 'cr1.8xlarge',
+ 'name': 'High Memory Cluster Eight Extra Large',
+ 'ram': 244000,
+ 'disk': 240,
+ 'bandwidth': None
+ },
+ 'hs1.4xlarge': {
+ 'id': 'hs1.4xlarge',
+ 'name': 'High Storage Quadruple Extra Large Instance',
+ 'ram': 61952,
+ 'disk': 2048,
+ 'bandwidth': None
+ },
+ 'hs1.8xlarge': {
+ 'id': 'hs1.8xlarge',
+ 'name': 'High Storage Eight Extra Large Instance',
+ 'ram': 119808,
+ 'disk': 48000,
+ 'bandwidth': None
+ },
+ # i2 instances have up to eight SSD drives
+ 'i2.xlarge': {
+ 'id': 'i2.xlarge',
+ 'name': 'High Storage Optimized Extra Large Instance',
+ 'ram': 31232,
+ 'disk': 800,
+ 'bandwidth': None
+ },
+ 'i2.2xlarge': {
+ 'id': 'i2.2xlarge',
+ 'name': 'High Storage Optimized Double Extra Large Instance',
+ 'ram': 62464,
+ 'disk': 1600,
+ 'bandwidth': None
+ },
+ 'i2.4xlarge': {
+ 'id': 'i2.4xlarge',
+ 'name': 'High Storage Optimized Quadruple Large Instance',
+ 'ram': 124928,
+ 'disk': 3200,
+ 'bandwidth': None
+ },
+ 'i2.8xlarge': {
+ 'id': 'i2.8xlarge',
+ 'name': 'High Storage Optimized Eight Extra Large Instance',
+ 'ram': 249856,
+ 'disk': 6400,
+ 'bandwidth': None
+ },
+ # 1x SSD
+ 'r3.large': {
+ 'id': 'r3.large',
+ 'name': 'Memory Optimized Large instance',
+ 'ram': 15000,
+ 'disk': 32,
+ 'bandwidth': None
+ },
+ 'r3.xlarge': {
+ 'id': 'r3.xlarge',
+ 'name': 'Memory Optimized Extra Large instance',
+ 'ram': 30500,
+ 'disk': 80,
+ 'bandwidth': None
+ },
+ 'r3.2xlarge': {
+ 'id': 'r3.2xlarge',
+ 'name': 'Memory Optimized Double Extra Large instance',
+ 'ram': 61000,
+ 'disk': 160,
+ 'bandwidth': None
+ },
+ 'r3.4xlarge': {
+ 'id': 'r3.4xlarge',
+ 'name': 'Memory Optimized Quadruple Extra Large instance',
+ 'ram': 122000,
+ 'disk': 320,
+ 'bandwidth': None
+ },
+ 'r3.8xlarge': {
+ 'id': 'r3.8xlarge',
+ 'name': 'Memory Optimized Eight Extra Large instance',
+ 'ram': 244000,
+ 'disk': 320, # x2
+ 'bandwidth': None
+ }
+}
+
+REGION_DETAILS = {
+ # US East (Northern Virginia) Region
+ 'us-east-1': {
+ 'endpoint': 'ec2.us-east-1.amazonaws.com',
+ 'api_name': 'ec2_us_east',
+ 'country': 'USA',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'cc2.8xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'cg1.4xlarge',
+ 'g2.2xlarge',
+ 'cr1.8xlarge',
+ 'hs1.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ 'r3.large',
+ 'r3.xlarge',
+ 'r3.2xlarge',
+ 'r3.4xlarge',
+ 'r3.8xlarge'
+ ]
+ },
+ # US West (Northern California) Region
+ 'us-west-1': {
+ 'endpoint': 'ec2.us-west-1.amazonaws.com',
+ 'api_name': 'ec2_us_west',
+ 'country': 'USA',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'g2.2xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ 'r3.large',
+ 'r3.xlarge',
+ 'r3.2xlarge',
+ 'r3.4xlarge',
+ 'r3.8xlarge'
+ ]
+ },
+ # US West (Oregon) Region
+ 'us-west-2': {
+ 'endpoint': 'ec2.us-west-2.amazonaws.com',
+ 'api_name': 'ec2_us_west_oregon',
+ 'country': 'US',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'g2.2xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'hs1.8xlarge',
+ 'cc2.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ 'r3.large',
+ 'r3.xlarge',
+ 'r3.2xlarge',
+ 'r3.4xlarge',
+ 'r3.8xlarge'
+ ]
+ },
+ # EU (Ireland) Region
+ 'eu-west-1': {
+ 'endpoint': 'ec2.eu-west-1.amazonaws.com',
+ 'api_name': 'ec2_eu_west',
+ 'country': 'Ireland',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'g2.2xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'hs1.8xlarge',
+ 'cc2.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ 'r3.large',
+ 'r3.xlarge',
+ 'r3.2xlarge',
+ 'r3.4xlarge',
+ 'r3.8xlarge'
+ ]
+ },
+ # Asia Pacific (Singapore) Region
+ 'ap-southeast-1': {
+ 'endpoint': 'ec2.ap-southeast-1.amazonaws.com',
+ 'api_name': 'ec2_ap_southeast',
+ 'country': 'Singapore',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'hs1.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ ]
+ },
+ # Asia Pacific (Tokyo) Region
+ 'ap-northeast-1': {
+ 'endpoint': 'ec2.ap-northeast-1.amazonaws.com',
+ 'api_name': 'ec2_ap_northeast',
+ 'country': 'Japan',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'g2.2xlarge',
+ 'c1.xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'hs1.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ 'r3.large',
+ 'r3.xlarge',
+ 'r3.2xlarge',
+ 'r3.4xlarge',
+ 'r3.8xlarge'
+ ]
+ },
+ # South America (Sao Paulo) Region
+ 'sa-east-1': {
+ 'endpoint': 'ec2.sa-east-1.amazonaws.com',
+ 'api_name': 'ec2_sa_east',
+ 'country': 'Brazil',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge'
+ ]
+ },
+ # Asia Pacific (Sydney) Region
+ 'ap-southeast-2': {
+ 'endpoint': 'ec2.ap-southeast-2.amazonaws.com',
+ 'api_name': 'ec2_ap_southeast_2',
+ 'country': 'Australia',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'm3.medium',
+ 'm3.large',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'c3.large',
+ 'c3.xlarge',
+ 'c3.2xlarge',
+ 'c3.4xlarge',
+ 'c3.8xlarge',
+ 'hs1.8xlarge',
+ 'i2.xlarge',
+ 'i2.2xlarge',
+ 'i2.4xlarge',
+ 'i2.8xlarge',
+ 'r3.large',
+ 'r3.xlarge',
+ 'r3.2xlarge',
+ 'r3.4xlarge',
+ 'r3.8xlarge'
+ ]
+ },
+ 'nimbus': {
+ # Nimbus clouds have 3 EC2-style instance types but their particular
+ # RAM allocations are configured by the admin
+ 'country': 'custom',
+ 'instance_types': [
+ 'm1.small',
+ 'm1.large',
+ 'm1.xlarge'
+ ]
+ }
+}
+
+
+"""
+Sizes must be hardcoded because Outscale doesn't provide an API to fetch them.
+Outscale cloud instances share some names with EC2 but have differents
+specifications so declare them in another constant.
+"""
+OUTSCALE_INSTANCE_TYPES = {
+ 't1.micro': {
+ 'id': 't1.micro',
+ 'name': 'Micro Instance',
+ 'ram': 615,
+ 'disk': 0,
+ 'bandwidth': None
+ },
+ 'm1.small': {
+ 'id': 'm1.small',
+ 'name': 'Standard Small Instance',
+ 'ram': 1740,
+ 'disk': 150,
+ 'bandwidth': None
+ },
+ 'm1.medium': {
+ 'id': 'm1.medium',
+ 'name': 'Standard Medium Instance',
+ 'ram': 3840,
+ 'disk': 420,
+ 'bandwidth': None
+ },
+ 'm1.large': {
+ 'id': 'm1.large',
+ 'name': 'Standard Large Instance',
+ 'ram': 7680,
+ 'disk': 840,
+ 'bandwidth': None
+ },
+ 'm1.xlarge': {
+ 'id': 'm1.xlarge',
+ 'name': 'Standard Extra Large Instance',
+ 'ram': 15360,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'c1.medium': {
+ 'id': 'c1.medium',
+ 'name': 'Compute Optimized Medium Instance',
+ 'ram': 1740,
+ 'disk': 340,
+ 'bandwidth': None
+ },
+ 'c1.xlarge': {
+ 'id': 'c1.xlarge',
+ 'name': 'Compute Optimized Extra Large Instance',
+ 'ram': 7168,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'c3.large': {
+ 'id': 'c3.large',
+ 'name': 'Compute Optimized Large Instance',
+ 'ram': 3840,
+ 'disk': 32,
+ 'bandwidth': None
+ },
+ 'c3.xlarge': {
+ 'id': 'c3.xlarge',
+ 'name': 'Compute Optimized Extra Large Instance',
+ 'ram': 7168,
+ 'disk': 80,
+ 'bandwidth': None
+ },
+ 'c3.2xlarge': {
+ 'id': 'c3.2xlarge',
+ 'name': 'Compute Optimized Double Extra Large Instance',
+ 'ram': 15359,
+ 'disk': 160,
+ 'bandwidth': None
+ },
+ 'c3.4xlarge': {
+ 'id': 'c3.4xlarge',
+ 'name': 'Compute Optimized Quadruple Extra Large Instance',
+ 'ram': 30720,
+ 'disk': 320,
+ 'bandwidth': None
+ },
+ 'c3.8xlarge': {
+ 'id': 'c3.8xlarge',
+ 'name': 'Compute Optimized Eight Extra Large Instance',
+ 'ram': 61440,
+ 'disk': 640,
+ 'bandwidth': None
+ },
+ 'm2.xlarge': {
+ 'id': 'm2.xlarge',
+ 'name': 'High Memory Extra Large Instance',
+ 'ram': 17510,
+ 'disk': 420,
+ 'bandwidth': None
+ },
+ 'm2.2xlarge': {
+ 'id': 'm2.2xlarge',
+ 'name': 'High Memory Double Extra Large Instance',
+ 'ram': 35020,
+ 'disk': 840,
+ 'bandwidth': None
+ },
+ 'm2.4xlarge': {
+ 'id': 'm2.4xlarge',
+ 'name': 'High Memory Quadruple Extra Large Instance',
+ 'ram': 70042,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'nv1.small': {
+ 'id': 'nv1.small',
+ 'name': 'GPU Small Instance',
+ 'ram': 1739,
+ 'disk': 150,
+ 'bandwidth': None
+ },
+ 'nv1.medium': {
+ 'id': 'nv1.medium',
+ 'name': 'GPU Medium Instance',
+ 'ram': 3839,
+ 'disk': 420,
+ 'bandwidth': None
+ },
+ 'nv1.large': {
+ 'id': 'nv1.large',
+ 'name': 'GPU Large Instance',
+ 'ram': 7679,
+ 'disk': 840,
+ 'bandwidth': None
+ },
+ 'nv1.xlarge': {
+ 'id': 'nv1.xlarge',
+ 'name': 'GPU Extra Large Instance',
+ 'ram': 15358,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'g2.2xlarge': {
+ 'id': 'g2.2xlarge',
+ 'name': 'GPU Double Extra Large Instance',
+ 'ram': 15360,
+ 'disk': 60,
+ 'bandwidth': None
+ },
+ 'cc1.4xlarge': {
+ 'id': 'cc1.4xlarge',
+ 'name': 'Cluster Compute Quadruple Extra Large Instance',
+ 'ram': 24576,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'cc2.8xlarge': {
+ 'id': 'cc2.8xlarge',
+ 'name': 'Cluster Compute Eight Extra Large Instance',
+ 'ram': 65536,
+ 'disk': 3360,
+ 'bandwidth': None
+ },
+ 'hi1.xlarge': {
+ 'id': 'hi1.xlarge',
+ 'name': 'High Storage Extra Large Instance',
+ 'ram': 15361,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'm3.xlarge': {
+ 'id': 'm3.xlarge',
+ 'name': 'High Storage Optimized Extra Large Instance',
+ 'ram': 15357,
+ 'disk': 0,
+ 'bandwidth': None
+ },
+ 'm3.2xlarge': {
+ 'id': 'm3.2xlarge',
+ 'name': 'High Storage Optimized Double Extra Large Instance',
+ 'ram': 30720,
+ 'disk': 0,
+ 'bandwidth': None
+ },
+ 'm3s.xlarge': {
+ 'id': 'm3s.xlarge',
+ 'name': 'High Storage Optimized Extra Large Instance',
+ 'ram': 15359,
+ 'disk': 0,
+ 'bandwidth': None
+ },
+ 'm3s.2xlarge': {
+ 'id': 'm3s.2xlarge',
+ 'name': 'High Storage Optimized Double Extra Large Instance',
+ 'ram': 30719,
+ 'disk': 0,
+ 'bandwidth': None
+ },
+ 'cr1.8xlarge': {
+ 'id': 'cr1.8xlarge',
+ 'name': 'Memory Optimized Eight Extra Large Instance',
+ 'ram': 249855,
+ 'disk': 240,
+ 'bandwidth': None
+ },
+ 'os1.2xlarge': {
+ 'id': 'os1.2xlarge',
+ 'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra '
+ 'Large Instance',
+ 'ram': 65536,
+ 'disk': 60,
+ 'bandwidth': None
+ },
+ 'os1.4xlarge': {
+ 'id': 'os1.4xlarge',
+ 'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext'
+ 'ra Large Instance',
+ 'ram': 131072,
+ 'disk': 120,
+ 'bandwidth': None
+ },
+ 'os1.8xlarge': {
+ 'id': 'os1.8xlarge',
+ 'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L'
+ 'arge Instance',
+ 'ram': 249856,
+ 'disk': 500,
+ 'bandwidth': None
+ },
+ 'oc1.4xlarge': {
+ 'id': 'oc1.4xlarge',
+ 'name': 'Outscale Quadruple Extra Large Instance',
+ 'ram': 24575,
+ 'disk': 1680,
+ 'bandwidth': None
+ },
+ 'oc2.8xlarge': {
+ 'id': 'oc2.8xlarge',
+ 'name': 'Outscale Eight Extra Large Instance',
+ 'ram': 65535,
+ 'disk': 3360,
+ 'bandwidth': None
+ }
+}
+
+
+"""
+The function manipulating Outscale cloud regions will be overriden because
+Outscale instances types are in a separate dict so also declare Outscale cloud
+regions in some other constants.
+"""
+OUTSCALE_SAS_REGION_DETAILS = {
+ 'eu-west-3': {
+ 'endpoint': 'api-ppd.outscale.com',
+ 'api_name': 'osc_sas_eu_west_3',
+ 'country': 'FRANCE',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'nv1.small',
+ 'nv1.medium',
+ 'nv1.large',
+ 'nv1.xlarge',
+ 'cc1.4xlarge',
+ 'cc2.8xlarge',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'cr1.8xlarge',
+ 'os1.8xlarge'
+ ]
+ },
+ 'eu-west-1': {
+ 'endpoint': 'api.eu-west-1.outscale.com',
+ 'api_name': 'osc_sas_eu_west_1',
+ 'country': 'FRANCE',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'nv1.small',
+ 'nv1.medium',
+ 'nv1.large',
+ 'nv1.xlarge',
+ 'cc1.4xlarge',
+ 'cc2.8xlarge',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'cr1.8xlarge',
+ 'os1.8xlarge'
+ ]
+ },
+ 'us-east-1': {
+ 'endpoint': 'api.us-east-1.outscale.com',
+ 'api_name': 'osc_sas_us_east_1',
+ 'country': 'USA',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'nv1.small',
+ 'nv1.medium',
+ 'nv1.large',
+ 'nv1.xlarge',
+ 'cc1.4xlarge',
+ 'cc2.8xlarge',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'cr1.8xlarge',
+ 'os1.8xlarge'
+ ]
+ }
+}
+
+
+OUTSCALE_INC_REGION_DETAILS = {
+ 'eu-west-1': {
+ 'endpoint': 'api.eu-west-1.outscale.com',
+ 'api_name': 'osc_inc_eu_west_1',
+ 'country': 'FRANCE',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'nv1.small',
+ 'nv1.medium',
+ 'nv1.large',
+ 'nv1.xlarge',
+ 'cc1.4xlarge',
+ 'cc2.8xlarge',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'cr1.8xlarge',
+ 'os1.8xlarge'
+ ]
+ },
+ 'eu-west-3': {
+ 'endpoint': 'api-ppd.outscale.com',
+ 'api_name': 'osc_inc_eu_west_3',
+ 'country': 'FRANCE',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'nv1.small',
+ 'nv1.medium',
+ 'nv1.large',
+ 'nv1.xlarge',
+ 'cc1.4xlarge',
+ 'cc2.8xlarge',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'cr1.8xlarge',
+ 'os1.8xlarge'
+ ]
+ },
+ 'us-east-1': {
+ 'endpoint': 'api.us-east-1.outscale.com',
+ 'api_name': 'osc_inc_us_east_1',
+ 'country': 'USA',
+ 'instance_types': [
+ 't1.micro',
+ 'm1.small',
+ 'm1.medium',
+ 'm1.large',
+ 'm1.xlarge',
+ 'c1.medium',
+ 'c1.xlarge',
+ 'm2.xlarge',
+ 'm2.2xlarge',
+ 'm2.4xlarge',
+ 'nv1.small',
+ 'nv1.medium',
+ 'nv1.large',
+ 'nv1.xlarge',
+ 'cc1.4xlarge',
+ 'cc2.8xlarge',
+ 'm3.xlarge',
+ 'm3.2xlarge',
+ 'cr1.8xlarge',
+ 'os1.8xlarge'
+ ]
+ }
+}
+
+
+"""
+Define the extra dictionary for specific resources
+"""
+RESOURCE_EXTRA_ATTRIBUTES_MAP = {
+ 'ebs_volume': {
+ 'snapshot_id': {
+ 'xpath': 'ebs/snapshotId',
+ 'transform_func': str
+ },
+ 'volume_id': {
+ 'xpath': 'ebs/volumeId',
+ 'transform_func': str
+ },
+ 'volume_size': {
+ 'xpath': 'ebs/volumeSize',
+ 'transform_func': int
+ },
+ 'delete': {
+ 'xpath': 'ebs/deleteOnTermination',
+ 'transform_func': str
+ },
+ 'volume_type': {
+ 'xpath': 'ebs/volumeType',
+ 'transform_func': str
+ },
+ 'iops': {
+ 'xpath': 'ebs/iops',
+ 'transform_func': int
+ }
+ },
+ 'elastic_ip': {
+ 'allocation_id': {
+ 'xpath': 'allocationId',
+ 'transform_func': str,
+ },
+ 'association_id': {
+ 'xpath': 'associationId',
+ 'transform_func': str,
+ },
+ 'interface_id': {
+ 'xpath': 'networkInterfaceId',
+ 'transform_func': str,
+ },
+ 'owner_id': {
+ 'xpath': 'networkInterfaceOwnerId',
+ 'transform_func': str,
+ },
+ 'private_ip': {
+ 'xpath': 'privateIp',
+ 'transform_func': str,
+ }
+ },
+ 'image': {
+ 'state': {
+ 'xpath': 'imageState',
+ 'transform_func': str
+ },
+ 'owner_id': {
+ 'xpath': 'imageOwnerId',
+ 'transform_func': str
+ },
+ 'owner_alias': {
+ 'xpath': 'imageOwnerAlias',
+ 'transform_func': str
+ },
+ 'is_public': {
+ 'xpath': 'isPublic',
+ 'transform_func': str
+ },
+ 'architecture': {
+ 'xpath': 'architecture',
+ 'transform_func': str
+ },
+ 'image_type': {
+ 'xpath': 'imageType',
+ 'transform_func': str
+ },
+ 'image_location': {
+ 'xpath': 'imageLocation',
+ 'transform_func': str
+ },
+ 'platform': {
+ 'xpath': 'platform',
+ 'transform_func': str
+ },
+ 'description': {
+ 'xpath': 'description',
+ 'transform_func': str
+ },
+ 'root_device_type': {
+ 'xpath': 'rootDeviceType',
+ 'transform_func': str
+ },
+ 'virtualization_type': {
+ 'xpath': 'virtualizationType',
+ 'transform_func': str
+ },
+ 'hypervisor': {
+ 'xpath': 'hypervisor',
+ 'transform_func': str
+ },
+ 'kernel_id': {
+ 'xpath': 'kernelId',
+ 'transform_func': str
+ },
+ 'ramdisk_id': {
+ 'xpath': 'ramdiskId',
+ 'transform_func': str
+ }
+ },
+ 'network': {
+ 'state': {
+ 'xpath': 'state',
+ 'transform_func': str
+ },
+ 'dhcp_options_id': {
+ 'xpath': 'dhcpOptionsId',
+ 'transform_func': str
+ },
+ 'instance_tenancy': {
+ 'xpath': 'instanceTenancy',
+ 'transform_func': str
+ },
+ 'is_default': {
+ 'xpath': 'isDefault',
+ 'transform_func': str
+ }
+ },
+ 'network_interface': {
+ 'subnet_id': {
+ 'xpath': 'subnetId',
+ 'transform_func': str
+ },
+ 'vpc_id': {
+ 'xpath': 'vpcId',
+ 'transform_func': str
+ },
+ 'zone': {
+ 'xpath': 'availabilityZone',
+ 'transform_func': str
+ },
+ 'description': {
+ 'xpath': 'description',
+ 'transform_func': str
+ },
+ 'owner_id': {
+ 'xpath': 'ownerId',
+ 'transform_func': str
+ },
+ 'mac_address': {
+ 'xpath': 'macAddress',
+ 'transform_func': str
+ },
+ 'private_dns_name': {
+ 'xpath': 'privateIpAddressesSet/privateDnsName',
+ 'transform_func': str
+ },
+ 'source_dest_check': {
+ 'xpath': 'sourceDestCheck',
+ 'transform_func': str
+ }
+ },
+ 'network_interface_attachment': {
+ 'attachment_id': {
+ 'xpath': 'attachment/attachmentId',
+ 'transform_func': str
+ },
+ 'instance_id': {
+ 'xpath': 'attachment/instanceId',
+ 'transform_func': str
+ },
+ 'owner_id': {
+ 'xpath': 'attachment/instanceOwnerId',
+ 'transform_func': str
+ },
+ 'device_index': {
+ 'xpath': 'attachment/deviceIndex',
+ 'transform_func': int
+ },
+ 'status': {
+ 'xpath': 'attachment/status',
+ 'transform_func': str
+ },
+ 'attach_time': {
+ 'xpath': 'attachment/attachTime',
+ 'transform_func': parse_date
+ },
+ 'delete': {
+ 'xpath': 'attachment/deleteOnTermination',
+ 'transform_func': str
+ }
+ },
+ 'node': {
+ 'availability': {
+ 'xpath': 'placement/availabilityZone',
+ 'transform_func': str
+ },
+ 'architecture': {
+ 'xpath': 'architecture',
+ 'transform_func': str
+ },
+ 'client_token': {
+ 'xpath': 'clientToken',
+ 'transform_func': str
+ },
+ 'dns_name': {
+ 'xpath': 'dnsName',
+ 'transform_func': str
+ },
+ 'hypervisor': {
+ 'xpath': 'hypervisor',
+ 'transform_func': str
+ },
+ 'iam_profile': {
+ 'xpath': 'iamInstanceProfile/id',
+ 'transform_func': str
+ },
+ 'image_id': {
+ 'xpath': 'imageId',
+ 'transform_func': str
+ },
+ 'instance_id': {
+ 'xpath': 'instanceId',
+ 'transform_func': str
+ },
+ 'instance_lifecycle': {
+ 'xpath': 'instanceLifecycle',
+ 'transform_func': str
+ },
+ 'instance_tenancy': {
+ 'xpath': 'placement/tenancy',
+ 'transform_func': str
+ },
+ 'instance_type': {
+ 'xpath': 'instanceType',
+ 'transform_func': str
+ },
+ 'key_name': {
+ 'xpath': 'keyName',
+ 'transform_func': str
+ },
+ 'launch_index': {
+ 'xpath': 'amiLaunchIndex',
+ 'transform_func': int
+ },
+ 'launch_time': {
+ 'xpath': 'launchTime',
+ 'transform_func': str
+ },
+ 'kernel_id': {
+ 'xpath': 'kernelId',
+ 'transform_func': str
+ },
+ 'monitoring': {
+ 'xpath': 'monitoring/state',
+ 'transform_func': str
+ },
+ 'platform': {
+ 'xpath': 'platform',
+ 'transform_func': str
+ },
+ 'private_dns': {
+ 'xpath': 'privateDnsName',
+ 'transform_func': str
+ },
+ 'ramdisk_id': {
+ 'xpath': 'ramdiskId',
+ 'transform_func': str
+ },
+ 'root_device_type': {
+ 'xpath': 'rootDeviceType',
+ 'transform_func': str
+ },
+ 'root_device_name': {
+ 'xpath': 'rootDeviceName',
+ 'transform_func': str
+ },
+ 'reason': {
+ 'xpath': 'reason',
+ 'transform_func': str
+ },
+ 'source_dest_check': {
+ 'xpath': 'sourceDestCheck',
+ 'transform_func': str
+ },
+ 'status': {
+ 'xpath': 'instanceState/name',
+ 'transform_func': str
+ },
+ 'subnet_id': {
+ 'xpath': 'subnetId',
+ 'transform_func': str
+ },
+ 'virtualization_type': {
+ 'xpath': 'virtualizationType',
+ 'transform_func': str
+ },
+ 'ebs_optimized': {
+ 'xpath': 'ebsOptimized',
+ 'transform_func': str
+ },
+ 'vpc_id': {
+ 'xpath': 'vpcId',
+ 'transform_func': str
+ }
+ },
+ 'reserved_node': {
+ 'instance_type': {
+ 'xpath': 'instanceType',
+ 'transform_func': str
+ },
+ 'availability': {
+ 'xpath': 'availabilityZone',
+ 'transform_func': str
+ },
+ 'start': {
+ 'xpath': 'start',
+ 'transform_func': str
+ },
+ 'duration': {
+ 'xpath': 'duration',
+ 'transform_func': int
+ },
+ 'usage_price': {
+ 'xpath': 'usagePrice',
+ 'transform_func': float
+ },
+ 'fixed_price': {
+ 'xpath': 'fixedPrice',
+ 'transform_func': float
+ },
+ 'instance_count': {
+ 'xpath': 'instanceCount',
+ 'transform_func': int
+ },
+ 'description': {
+ 'xpath': 'productDescription',
+ 'transform_func': str
+ },
+ 'instance_tenancy': {
+ 'xpath': 'instanceTenancy',
+ 'transform_func': str
+ },
+ 'currency_code': {
+ 'xpath': 'currencyCode',
+ 'transform_func': str
+ },
+ 'offering_type': {
+ 'xpath': 'offeringType',
+ 'transform_func': str
+ }
+ },
+ 'security_group': {
+ 'vpc_id': {
+ 'xpath': 'vpcId',
+ 'transform_func': str
+ },
+ 'description': {
+ 'xpath': 'groupDescription',
+ 'transform_func': str
+ },
+ 'owner_id': {
+ 'xpath': 'ownerId',
+ 'transform_func': str
+ }
+ },
+ 'snapshot': {
+ 'volume_id': {
+ 'xpath': 'volumeId',
+ 'transform_func': str
+ },
+ 'state': {
+ 'xpath': 'status',
+ 'transform_func': str
+ },
+ 'description': {
+ 'xpath': 'description',
+ 'transform_func': str
+ },
+ 'progress': {
+ 'xpath': 'progress',
+ 'transform_func': str
+ },
+ 'start_time': {
+ 'xpath': 'startTime',
+ 'transform_func': parse_date
+ }
+ },
+ 'subnet': {
+ 'cidr_block': {
+ 'xpath': 'cidrBlock',
+ 'transform_func': str
+ },
+ 'available_ips': {
+ 'xpath': 'availableIpAddressCount',
+ 'transform_func': int
+ },
+ 'zone': {
+ 'xpath': 'availabilityZone',
+ 'transform_func': str
+ },
+ 'vpc_id': {
+ 'xpath': 'vpcId',
+ 'transform_func': str
+ }
+ },
+ 'volume': {
+ 'device': {
+ 'xpath': 'attachmentSet/item/device',
+ 'transform_func': str
+ },
+ 'iops': {
+ 'xpath': 'iops',
+ 'transform_func': int
+ },
+ 'zone': {
+ 'xpath': 'availabilityZone',
+ 'transform_func': str
+ },
+ 'create_time': {
+ 'xpath': 'createTime',
+ 'transform_func': parse_date
+ },
+ 'state': {
+ 'xpath': 'status',
+ 'transform_func': str
+ },
+ 'attach_time': {
+ 'xpath': 'attachmentSet/item/attachTime',
+ 'transform_func': parse_date
+ },
+ 'attachment_status': {
+ 'xpath': 'attachmentSet/item/status',
+ 'transform_func': str
+ },
+ 'instance_id': {
+ 'xpath': 'attachmentSet/item/instanceId',
+ 'transform_func': str
+ },
+ 'delete': {
+ 'xpath': 'attachmentSet/item/deleteOnTermination',
+ 'transform_func': str
+ }
+ },
+ 'route_table': {
+ 'vpc_id': {
+ 'xpath': 'vpcId',
+ 'transform_func': str
+ }
+ }
+}
+
+VALID_EC2_REGIONS = REGION_DETAILS.keys()
+VALID_EC2_REGIONS = [r for r in VALID_EC2_REGIONS if r != 'nimbus']
+
+
+class EC2NodeLocation(NodeLocation):
+ def __init__(self, id, name, country, driver, availability_zone):
+ super(EC2NodeLocation, self).__init__(id, name, country, driver)
+ self.availability_zone = availability_zone
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.country,
+ self.availability_zone, self.driver.name))
+
+
+class EC2Response(AWSBaseResponse):
+ """
+ EC2 specific response parsing and error handling.
+ """
+
+ def parse_error(self):
+ err_list = []
+ # Okay, so for Eucalyptus, you can get a 403, with no body,
+ # if you are using the wrong user/password.
+ msg = "Failure: 403 Forbidden"
+ if self.status == 403 and self.body[:len(msg)] == msg:
+ raise InvalidCredsError(msg)
+
+ try:
+ body = ET.XML(self.body)
+ except:
+ raise MalformedResponseError("Failed to parse XML",
+ body=self.body, driver=EC2NodeDriver)
+
+ for err in body.findall('Errors/Error'):
+ code, message = err.getchildren()
+ err_list.append('%s: %s' % (code.text, message.text))
+ if code.text == 'InvalidClientTokenId':
+ raise InvalidCredsError(err_list[-1])
+ if code.text == 'SignatureDoesNotMatch':
+ raise InvalidCredsError(err_list[-1])
+ if code.text == 'AuthFailure':
+ raise InvalidCredsError(err_list[-1])
+ if code.text == 'OptInRequired':
+ raise InvalidCredsError(err_list[-1])
+ if code.text == 'IdempotentParameterMismatch':
+ raise IdempotentParamError(err_list[-1])
+ if code.text == 'InvalidKeyPair.NotFound':
+ # TODO: Use connection context instead
+ match = re.match(r'.*\'(.+?)\'.*', message.text)
+
+ if match:
+ name = match.groups()[0]
+ else:
+ name = None
+
+ raise KeyPairDoesNotExistError(name=name,
+ driver=self.connection.driver)
+ return '\n'.join(err_list)
+
+
+class EC2Connection(SignedAWSConnection):
+ """
+ Represents a single connection to the EC2 Endpoint.
+ """
+
+ version = API_VERSION
+ host = REGION_DETAILS['us-east-1']['endpoint']
+ responseCls = EC2Response
+
+
+class ExEC2AvailabilityZone(object):
+ """
+ Extension class which stores information about an EC2 availability zone.
+
+ Note: This class is EC2 specific.
+ """
+
+ def __init__(self, name, zone_state, region_name):
+ self.name = name
+ self.zone_state = zone_state
+ self.region_name = region_name
+
+ def __repr__(self):
+ return (('')
+ % (self.name, self.zone_state, self.region_name))
+
+
+class EC2ReservedNode(Node):
+ """
+ Class which stores information about EC2 reserved instances/nodes
+ Inherits from Node and passes in None for name and private/public IPs
+
+ Note: This class is EC2 specific.
+ """
+
+ def __init__(self, id, state, driver, size=None, image=None, extra=None):
+ super(EC2ReservedNode, self).__init__(id=id, name=None, state=state,
+ public_ips=None,
+ private_ips=None,
+ driver=driver, extra=extra)
+
+ def __repr__(self):
+ return (('') % (self.id))
+
+
+class EC2SecurityGroup(object):
+ """
+ Represents information about a Security group
+
+ Note: This class is EC2 specific.
+ """
+
+ def __init__(self, id, name, ingress_rules, egress_rules, extra=None):
+ self.id = id
+ self.name = name
+ self.ingress_rules = ingress_rules
+ self.egress_rules = egress_rules
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('')
+ % (self.ip, self.domain, self.instance_id))
+
+
+class VPCInternetGateway(object):
+ """
+ Class which stores information about VPC Internet Gateways.
+
+ Note: This class is VPC specific.
+ """
+
+ def __init__(self, id, name, vpc_id, state, driver, extra=None):
+ self.id = id
+ self.name = name
+ self.vpc_id = vpc_id
+ self.state = state
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('') % (self.id))
+
+
+class EC2RouteTable(object):
+ """
+ Class which stores information about VPC Route Tables.
+
+ Note: This class is VPC specific.
+ """
+
+ def __init__(self, id, name, routes, subnet_associations,
+ propagating_gateway_ids, extra=None):
+ """
+ :param id: The ID of the route table.
+ :type id: ``str``
+
+ :param name: The name of the route table.
+ :type name: ``str``
+
+ :param routes: A list of routes in the route table.
+ :type routes: ``list`` of :class:`EC2Route`
+
+ :param subnet_associations: A list of associations between the
+ route table and one or more subnets.
+ :type subnet_associations: ``list`` of
+ :class:`EC2SubnetAssociation`
+
+ :param propagating_gateway_ids: The list of IDs of any virtual
+ private gateways propagating the
+ routes.
+ :type propagating_gateway_ids: ``list``
+ """
+
+ self.id = id
+ self.name = name
+ self.routes = routes
+ self.subnet_associations = subnet_associations
+ self.propagating_gateway_ids = propagating_gateway_ids
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (('') % (self.id))
+
+
+class EC2Route(object):
+ """
+ Class which stores information about a Route.
+
+ Note: This class is VPC specific.
+ """
+
+ def __init__(self, cidr, gateway_id, instance_id, owner_id,
+ interface_id, state, origin, vpc_peering_connection_id):
+ """
+ :param cidr: The CIDR block used for the destination match.
+ :type cidr: ``str``
+
+ :param gateway_id: The ID of a gateway attached to the VPC.
+ :type gateway_id: ``str``
+
+ :param instance_id: The ID of a NAT instance in the VPC.
+ :type instance_id: ``str``
+
+ :param owner_id: The AWS account ID of the owner of the instance.
+ :type owner_id: ``str``
+
+ :param interface_id: The ID of the network interface.
+ :type interface_id: ``str``
+
+ :param state: The state of the route (active | blackhole).
+ :type state: ``str``
+
+ :param origin: Describes how the route was created.
+ :type origin: ``str``
+
+ :param vpc_peering_connection_id: The ID of the VPC
+ peering connection.
+ :type vpc_peering_connection_id: ``str``
+ """
+
+ self.cidr = cidr
+ self.gateway_id = gateway_id
+ self.instance_id = instance_id
+ self.owner_id = owner_id
+ self.interface_id = interface_id
+ self.state = state
+ self.origin = origin
+ self.vpc_peering_connection_id = vpc_peering_connection_id
+
+ def __repr__(self):
+ return (('') % (self.cidr))
+
+
+class EC2SubnetAssociation(object):
+ """
+ Class which stores information about Route Table associated with
+ a given Subnet in a VPC
+
+ Note: This class is VPC specific.
+ """
+
+ def __init__(self, id, route_table_id, subnet_id, main=False):
+ """
+ :param id: The ID of the subent association in the VPC.
+ :type id: ``str``
+
+ :param route_table_id: The ID of a route table in the VPC.
+ :type route_table_id: ``str``
+
+ :param subnet_id: The ID of a subnet in the VPC.
+ :type subnet_id: ``str``
+
+ :param main: If true, means this is a main VPC route table.
+ :type main: ``bool``
+ """
+
+ self.id = id
+ self.route_table_id = route_table_id
+ self.subnet_id = subnet_id
+ self.main = main
+
+ def __repr__(self):
+ return (('') % (self.id))
+
+
+class BaseEC2NodeDriver(NodeDriver):
+ """
+ Base Amazon EC2 node driver.
+
+ Used for main EC2 and other derivate driver classes to inherit from it.
+ """
+
+ connectionCls = EC2Connection
+ features = {'create_node': ['ssh_key']}
+ path = '/'
+
+ NODE_STATE_MAP = {
+ 'pending': NodeState.PENDING,
+ 'running': NodeState.RUNNING,
+ 'shutting-down': NodeState.UNKNOWN,
+ 'terminated': NodeState.TERMINATED
+ }
+
+ def list_nodes(self, ex_node_ids=None, ex_filters=None):
+ """
+ List all nodes
+
+ Ex_node_ids parameter is used to filter the list of
+ nodes that should be returned. Only the nodes
+ with the corresponding node ids will be returned.
+
+ :param ex_node_ids: List of ``node.id``
+ :type ex_node_ids: ``list`` of ``str``
+
+ :param ex_filters: The filters so that the response includes
+ information for only certain nodes.
+ :type ex_filters: ``dict``
+
+ :rtype: ``list`` of :class:`Node`
+ """
+
+ params = {'Action': 'DescribeInstances'}
+
+ if ex_node_ids:
+ params.update(self._pathlist('InstanceId', ex_node_ids))
+
+ if ex_filters:
+ params.update(self._build_filters(ex_filters))
+
+ elem = self.connection.request(self.path, params=params).object
+
+ nodes = []
+ for rs in findall(element=elem, xpath='reservationSet/item',
+ namespace=NAMESPACE):
+ nodes += self._to_nodes(rs, 'instancesSet/item')
+
+ nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)
+
+ for node in nodes:
+ ips = nodes_elastic_ips_mappings[node.id]
+ node.public_ips.extend(ips)
+
+ return nodes
+
+ def list_sizes(self, location=None):
+ available_types = REGION_DETAILS[self.region_name]['instance_types']
+ sizes = []
+
+ for instance_type in available_types:
+ attributes = INSTANCE_TYPES[instance_type]
+ attributes = copy.deepcopy(attributes)
+ price = self._get_size_price(size_id=instance_type)
+ attributes.update({'price': price})
+ sizes.append(NodeSize(driver=self, **attributes))
+ return sizes
+
+ def list_images(self, location=None, ex_image_ids=None, ex_owner=None,
+ ex_executableby=None):
+ """
+ List all images
+ @inherits: :class:`NodeDriver.list_images`
+
+ Ex_image_ids parameter is used to filter the list of
+ images that should be returned. Only the images
+ with the corresponding image ids will be returned.
+
+ Ex_owner parameter is used to filter the list of
+ images that should be returned. Only the images
+ with the corresponding owner will be returned.
+ Valid values: amazon|aws-marketplace|self|all|aws id
+
+ Ex_executableby parameter describes images for which
+ the specified user has explicit launch permissions.
+ The user can be an AWS account ID, self to return
+ images for which the sender of the request has
+ explicit launch permissions, or all to return
+ images with public launch permissions.
+ Valid values: all|self|aws id
+
+ :param ex_image_ids: List of ``NodeImage.id``
+ :type ex_image_ids: ``list`` of ``str``
+
+ :param ex_owner: Owner name
+ :type ex_owner: ``str``
+
+ :param ex_executableby: Executable by
+ :type ex_executableby: ``str``
+
+ :rtype: ``list`` of :class:`NodeImage`
+ """
+ params = {'Action': 'DescribeImages'}
+
+ if ex_owner:
+ params.update({'Owner.1': ex_owner})
+
+ if ex_executableby:
+ params.update({'ExecutableBy.1': ex_executableby})
+
+ if ex_image_ids:
+ for index, image_id in enumerate(ex_image_ids):
+ index += 1
+ params.update({'ImageId.%s' % (index): image_id})
+
+ images = self._to_images(
+ self.connection.request(self.path, params=params).object
+ )
+ return images
+
+ def get_image(self, image_id):
+ """
+ Get an image based on a image_id
+
+ :param image_id: Image identifier
+ :type image_id: ``str``
+
+ :return: A NodeImage object
+ :rtype: :class:`NodeImage`
+
+ """
+ images = self.list_images(ex_image_ids=[image_id])
+ image = images[0]
+
+ return image
+
+ def list_locations(self):
+ locations = []
+ for index, availability_zone in \
+ enumerate(self.ex_list_availability_zones()):
+ locations.append(EC2NodeLocation(
+ index, availability_zone.name, self.country, self,
+ availability_zone)
+ )
+ return locations
+
+ def list_volumes(self, node=None):
+ params = {
+ 'Action': 'DescribeVolumes',
+ }
+ if node:
+ filters = {'attachment.instance-id': node.id}
+ params.update(self._build_filters(filters))
+
+ response = self.connection.request(self.path, params=params).object
+ volumes = [self._to_volume(el) for el in response.findall(
+ fixxpath(xpath='volumeSet/item', namespace=NAMESPACE))
+ ]
+ return volumes
+
+ def create_node(self, **kwargs):
+ """
+ Create a new EC2 node.
+
+ Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_keyname: The name of the key pair
+ :type ex_keyname: ``str``
+
+ :keyword ex_userdata: User data
+ :type ex_userdata: ``str``
+
+ :keyword ex_security_groups: A list of names of security groups to
+ assign to the node.
+ :type ex_security_groups: ``list``
+
+ :keyword ex_metadata: Key/Value metadata to associate with a node
+ :type ex_metadata: ``dict``
+
+ :keyword ex_mincount: Minimum number of instances to launch
+ :type ex_mincount: ``int``
+
+ :keyword ex_maxcount: Maximum number of instances to launch
+ :type ex_maxcount: ``int``
+
+ :keyword ex_clienttoken: Unique identifier to ensure idempotency
+ :type ex_clienttoken: ``str``
+
+ :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
+ mappings.
+ :type ex_blockdevicemappings: ``list`` of ``dict``
+
+ :keyword ex_iamprofile: Name or ARN of IAM profile
+ :type ex_iamprofile: ``str``
+
+ :keyword ex_ebs_optimized: EBS-Optimized if True
+ :type ex_ebs_optimized: ``bool``
+
+ :keyword ex_subnet: The subnet to launch the instance into.
+ :type ex_subnet: :class:`.EC2Subnet`
+ """
+ image = kwargs["image"]
+ size = kwargs["size"]
+ params = {
+ 'Action': 'RunInstances',
+ 'ImageId': image.id,
+ 'MinCount': str(kwargs.get('ex_mincount', '1')),
+ 'MaxCount': str(kwargs.get('ex_maxcount', '1')),
+ 'InstanceType': size.id
+ }
+
+ if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs:
+ raise ValueError('You can only supply ex_security_groups or'
+ ' ex_securitygroup')
+
+ # ex_securitygroup is here for backward compatibility
+ ex_security_groups = kwargs.get('ex_security_groups', None)
+ ex_securitygroup = kwargs.get('ex_securitygroup', None)
+ security_groups = ex_security_groups or ex_securitygroup
+
+ if security_groups:
+ if not isinstance(security_groups, (tuple, list)):
+ security_groups = [security_groups]
+
+ for sig in range(len(security_groups)):
+ params['SecurityGroup.%d' % (sig + 1,)] =\
+ security_groups[sig]
+
+ if 'location' in kwargs:
+ availability_zone = getattr(kwargs['location'],
+ 'availability_zone', None)
+ if availability_zone:
+ if availability_zone.region_name != self.region_name:
+ raise AttributeError('Invalid availability zone: %s'
+ % (availability_zone.name))
+ params['Placement.AvailabilityZone'] = availability_zone.name
+
+ if 'auth' in kwargs and 'ex_keyname' in kwargs:
+ raise AttributeError('Cannot specify auth and ex_keyname together')
+
+ if 'auth' in kwargs:
+ auth = self._get_and_check_auth(kwargs['auth'])
+ key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey)
+ params['KeyName'] = key['keyName']
+
+ if 'ex_keyname' in kwargs:
+ params['KeyName'] = kwargs['ex_keyname']
+
+ if 'ex_userdata' in kwargs:
+ params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\
+ .decode('utf-8')
+
+ if 'ex_clienttoken' in kwargs:
+ params['ClientToken'] = kwargs['ex_clienttoken']
+
+ if 'ex_blockdevicemappings' in kwargs:
+ params.update(self._get_block_device_mapping_params(
+ kwargs['ex_blockdevicemappings']))
+
+ if 'ex_iamprofile' in kwargs:
+ if not isinstance(kwargs['ex_iamprofile'], basestring):
+ raise AttributeError('ex_iamprofile not string')
+
+ if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'):
+ params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile']
+ else:
+ params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile']
+
+ if 'ex_ebs_optimized' in kwargs:
+ params['EbsOptimized'] = kwargs['ex_ebs_optimized']
+
+ if 'ex_subnet' in kwargs:
+ params['SubnetId'] = kwargs['ex_subnet'].id
+
+ object = self.connection.request(self.path, params=params).object
+ nodes = self._to_nodes(object, 'instancesSet/item')
+
+ for node in nodes:
+ tags = {'Name': kwargs['name']}
+ if 'ex_metadata' in kwargs:
+ tags.update(kwargs['ex_metadata'])
+
+ try:
+ self.ex_create_tags(resource=node, tags=tags)
+ except Exception:
+ continue
+
+ node.name = kwargs['name']
+ node.extra.update({'tags': tags})
+
+ if len(nodes) == 1:
+ return nodes[0]
+ else:
+ return nodes
+
+ def reboot_node(self, node):
+ params = {'Action': 'RebootInstances'}
+ params.update(self._pathlist('InstanceId', [node.id]))
+ res = self.connection.request(self.path, params=params).object
+ return self._get_boolean(res)
+
+ def destroy_node(self, node):
+ params = {'Action': 'TerminateInstances'}
+ params.update(self._pathlist('InstanceId', [node.id]))
+ res = self.connection.request(self.path, params=params).object
+ return self._get_terminate_boolean(res)
+
+ def create_volume(self, size, name, location=None, snapshot=None,
+ ex_volume_type='standard', ex_iops=None):
+ """
+ :param location: Datacenter in which to create a volume in.
+ :type location: :class:`.ExEC2AvailabilityZone`
+
+ :param ex_volume_type: Type of volume to create.
+ :type ex_volume_type: ``str``
+
+ :param iops: The number of I/O operations per second (IOPS)
+ that the volume supports. Only used if ex_volume_type
+ is io1.
+ :type iops: ``int``
+ """
+ valid_volume_types = ['standard', 'io1', 'g2']
+
+ params = {
+ 'Action': 'CreateVolume',
+ 'Size': str(size)}
+
+ if ex_volume_type and ex_volume_type not in valid_volume_types:
+ raise ValueError('Invalid volume type specified: %s' %
+ (ex_volume_type))
+
+ if location is not None:
+ params['AvailabilityZone'] = location.availability_zone.name
+
+ if ex_volume_type:
+ params['VolumeType'] = ex_volume_type
+
+ if ex_volume_type == 'io1' and ex_iops:
+ params['Iops'] = ex_iops
+
+ volume = self._to_volume(
+ self.connection.request(self.path, params=params).object,
+ name=name)
+
+ if self.ex_create_tags(volume, {'Name': name}):
+ volume.extra['tags']['Name'] = name
+
+ return volume
+
+ def attach_volume(self, node, volume, device):
+ params = {
+ 'Action': 'AttachVolume',
+ 'VolumeId': volume.id,
+ 'InstanceId': node.id,
+ 'Device': device}
+
+ self.connection.request(self.path, params=params)
+ return True
+
+ def detach_volume(self, volume):
+ params = {
+ 'Action': 'DetachVolume',
+ 'VolumeId': volume.id}
+
+ self.connection.request(self.path, params=params)
+ return True
+
+ def destroy_volume(self, volume):
+ params = {
+ 'Action': 'DeleteVolume',
+ 'VolumeId': volume.id}
+ response = self.connection.request(self.path, params=params).object
+ return self._get_boolean(response)
+
+ def create_volume_snapshot(self, volume, name=None):
+ """
+ Create snapshot from volume
+
+ :param volume: Instance of ``StorageVolume``
+ :type volume: ``StorageVolume``
+
+ :param name: Name of snapshot
+ :type name: ``str``
+
+ :rtype: :class:`VolumeSnapshot`
+ """
+ params = {
+ 'Action': 'CreateSnapshot',
+ 'VolumeId': volume.id,
+ }
+
+ if name:
+ params.update({
+ 'Description': name,
+ })
+ response = self.connection.request(self.path, params=params).object
+ snapshot = self._to_snapshot(response, name)
+
+ if name and self.ex_create_tags(snapshot, {'Name': name}):
+ snapshot.extra['tags']['Name'] = name
+
+ return snapshot
+
+ def list_volume_snapshots(self, snapshot):
+ return self.list_snapshots(snapshot)
+
+ def list_snapshots(self, snapshot=None, owner=None):
+ """
+ Describe all snapshots.
+
+ :param snapshot: If provided, only return snapshot information for the
+ provided snapshot.
+
+ :param owner: Owner for snapshot: self|amazon|ID
+ :type owner: ``str``
+
+ :rtype: ``list`` of :class:`VolumeSnapshot`
+ """
+ params = {
+ 'Action': 'DescribeSnapshots',
+ }
+ if snapshot:
+ params.update({
+ 'SnapshotId.1': snapshot.id,
+ })
+ if owner:
+ params.update({
+ 'Owner.1': owner,
+ })
+ response = self.connection.request(self.path, params=params).object
+ snapshots = self._to_snapshots(response)
+ return snapshots
+
+ def destroy_volume_snapshot(self, snapshot):
+ params = {
+ 'Action': 'DeleteSnapshot',
+ 'SnapshotId': snapshot.id
+ }
+ response = self.connection.request(self.path, params=params).object
+ return self._get_boolean(response)
+
+ # Key pair management methods
+
+ def list_key_pairs(self):
+ params = {
+ 'Action': 'DescribeKeyPairs'
+ }
+
+ response = self.connection.request(self.path, params=params)
+ elems = findall(element=response.object, xpath='keySet/item',
+ namespace=NAMESPACE)
+
+ key_pairs = self._to_key_pairs(elems=elems)
+ return key_pairs
+
+ def get_key_pair(self, name):
+ params = {
+ 'Action': 'DescribeKeyPairs',
+ 'KeyName': name
+ }
+
+ response = self.connection.request(self.path, params=params)
+ elems = findall(element=response.object, xpath='keySet/item',
+ namespace=NAMESPACE)
+
+ key_pair = self._to_key_pairs(elems=elems)[0]
+ return key_pair
+
+ def create_key_pair(self, name):
+ params = {
+ 'Action': 'CreateKeyPair',
+ 'KeyName': name
+ }
+
+ response = self.connection.request(self.path, params=params)
+ elem = response.object
+ key_pair = self._to_key_pair(elem=elem)
+ return key_pair
+
+ def import_key_pair_from_string(self, name, key_material):
+ base64key = ensure_string(base64.b64encode(b(key_material)))
+
+ params = {
+ 'Action': 'ImportKeyPair',
+ 'KeyName': name,
+ 'PublicKeyMaterial': base64key
+ }
+
+ response = self.connection.request(self.path, params=params)
+ elem = response.object
+ key_pair = self._to_key_pair(elem=elem)
+ return key_pair
+
+ def delete_key_pair(self, key_pair):
+ params = {
+ 'Action': 'DeleteKeyPair',
+ 'KeyName': key_pair.name
+ }
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def copy_image(self, image, source_region, name=None, description=None):
+ """
+ Copy an Amazon Machine Image from the specified source region
+ to the current region.
+
+ @inherits: :class:`NodeDriver.copy_image`
+
+ :param source_region: The region where the image resides
+ :type source_region: ``str``
+
+ :param image: Instance of class NodeImage
+ :type image: :class:`NodeImage`
+
+ :param name: The name of the new image
+ :type name: ``str``
+
+ :param description: The description of the new image
+ :type description: ``str``
+
+ :return: Instance of class ``NodeImage``
+ :rtype: :class:`NodeImage`
+ """
+ params = {'Action': 'CopyImage',
+ 'SourceRegion': source_region,
+ 'SourceImageId': image.id}
+
+ if name is not None:
+ params['Name'] = name
+
+ if description is not None:
+ params['Description'] = description
+
+ image = self._to_image(
+ self.connection.request(self.path, params=params).object)
+
+ return image
+
+ def create_image(self, node, name, description=None, reboot=False,
+ block_device_mapping=None):
+ """
+ Create an Amazon Machine Image based off of an EBS-backed instance.
+
+ @inherits: :class:`NodeDriver.create_image`
+
+ :param node: Instance of ``Node``
+ :type node: :class: `Node`
+
+ :param name: The name for the new image
+ :type name: ``str``
+
+ :param block_device_mapping: A dictionary of the disk layout
+ An example of this dict is included
+ below.
+ :type block_device_mapping: ``list`` of ``dict``
+
+ :param reboot: Whether or not to shutdown the instance before
+ creation. Amazon calls this NoReboot and
+ sets it to false by default to ensure a
+ clean image.
+ :type reboot: ``bool``
+
+ :param description: An optional description for the new image
+ :type description: ``str``
+
+ An example block device mapping dictionary is included:
+
+ mapping = [{'VirtualName': None,
+ 'Ebs': {'VolumeSize': 10,
+ 'VolumeType': 'standard',
+ 'DeleteOnTermination': 'true'},
+ 'DeviceName': '/dev/sda1'}]
+
+ :return: Instance of class ``NodeImage``
+ :rtype: :class:`NodeImage`
+ """
+ params = {'Action': 'CreateImage',
+ 'InstanceId': node.id,
+ 'Name': name,
+ 'NoReboot': not reboot}
+
+ if description is not None:
+ params['Description'] = description
+
+ if block_device_mapping is not None:
+ params.update(self._get_block_device_mapping_params(
+ block_device_mapping))
+
+ image = self._to_image(
+ self.connection.request(self.path, params=params).object)
+
+ return image
+
+ def delete_image(self, image):
+ """
+ Deletes an image at Amazon given a NodeImage object
+
+ @inherits: :class:`NodeDriver.delete_image`
+
+ :param image: Instance of ``NodeImage``
+ :type image: :class: `NodeImage`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeregisterImage',
+ 'ImageId': image.id}
+
+ response = self.connection.request(self.path, params=params).object
+ return self._get_boolean(response)
+
+ def ex_register_image(self, name, description=None, architecture=None,
+ image_location=None, root_device_name=None,
+ block_device_mapping=None, kernel_id=None,
+ ramdisk_id=None):
+ """
+ Registers an Amazon Machine Image based off of an EBS-backed instance.
+ Can also be used to create images from snapshots. More information
+ can be found at http://goo.gl/hqZq0a.
+
+ :param name: The name for the AMI being registered
+ :type name: ``str``
+
+ :param description: The description of the AMI (optional)
+ :type description: ``str``
+
+ :param architecture: The architecture of the AMI (i386/x86_64)
+ (optional)
+ :type architecture: ``str``
+
+ :param image_location: The location of the AMI within Amazon S3
+ Required if registering an instance
+ store-backed AMI
+ :type image_location: ``str``
+
+ :param root_device_name: The device name for the root device
+ Required if registering a EBS-backed AMI
+ :type root_device_name: ``str``
+
+ :param block_device_mapping: A dictionary of the disk layout
+ (optional)
+ :type block_device_mapping: ``dict``
+
+ :param kernel_id: Kernel id for AMI (optional)
+ :type kernel_id: ``str``
+
+ :param ramdisk_id: RAM disk for AMI (optional)
+ :type ramdisk_id: ``str``
+
+ :rtype: :class:`NodeImage`
+ """
+
+ params = {'Action': 'RegisterImage',
+ 'Name': name}
+
+ if description is not None:
+ params['Description'] = description
+
+ if architecture is not None:
+ params['Architecture'] = architecture
+
+ if image_location is not None:
+ params['ImageLocation'] = image_location
+
+ if root_device_name is not None:
+ params['RootDeviceName'] = root_device_name
+
+ if block_device_mapping is not None:
+ params.update(self._get_block_device_mapping_params(
+ block_device_mapping))
+
+ if kernel_id is not None:
+ params['KernelId'] = kernel_id
+
+ if ramdisk_id is not None:
+ params['RamDiskId'] = ramdisk_id
+
+ image = self._to_image(
+ self.connection.request(self.path, params=params).object
+ )
+ return image
+
+ def ex_list_networks(self, network_ids=None, filters=None):
+ """
+ Return a list of :class:`EC2Network` objects for the
+ current region.
+
+ :param network_ids: Return only networks matching the provided
+ network IDs. If not specified, a list of all
+ the networks in the corresponding region
+ is returned.
+ :type network_ids: ``list``
+
+ :param filters: The filters so that the response includes
+ information for only certain networks.
+ :type filters: ``dict``
+
+ :rtype: ``list`` of :class:`EC2Network`
+ """
+ params = {'Action': 'DescribeVpcs'}
+
+ if network_ids:
+ params.update(self._pathlist('VpcId', network_ids))
+
+ if filters:
+ params.update(self._build_filters(filters))
+
+ return self._to_networks(
+ self.connection.request(self.path, params=params).object
+ )
+
+ def ex_create_network(self, cidr_block, name=None,
+ instance_tenancy='default'):
+ """
+ Create a network/VPC
+
+ :param cidr_block: The CIDR block assigned to the network
+ :type cidr_block: ``str``
+
+ :param name: An optional name for the network
+ :type name: ``str``
+
+ :param instance_tenancy: The allowed tenancy of instances launched
+ into the VPC.
+ Valid values: default/dedicated
+ :type instance_tenancy: ``str``
+
+ :return: Dictionary of network properties
+ :rtype: ``dict``
+ """
+ params = {'Action': 'CreateVpc',
+ 'CidrBlock': cidr_block,
+ 'InstanceTenancy': instance_tenancy}
+
+ response = self.connection.request(self.path, params=params).object
+ element = response.findall(fixxpath(xpath='vpc',
+ namespace=NAMESPACE))[0]
+
+ network = self._to_network(element, name)
+
+ if name and self.ex_create_tags(network, {'Name': name}):
+ network.extra['tags']['Name'] = name
+
+ return network
+
+ def ex_delete_network(self, vpc):
+ """
+ Deletes a network/VPC.
+
+ :param vpc: VPC to delete.
+ :type vpc: :class:`.EC2Network`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteVpc', 'VpcId': vpc.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_list_subnets(self, subnet_ids=None, filters=None):
+ """
+ Return a list of :class:`EC2NetworkSubnet` objects for the
+ current region.
+
+ :param subnet_ids: Return only subnets matching the provided
+ subnet IDs. If not specified, a list of all
+ the subnets in the corresponding region
+ is returned.
+ :type subnet_ids: ``list``
+
+ :param filters: The filters so that the response includes
+ information for only certain subnets.
+ :type filters: ``dict``
+
+ :rtype: ``list`` of :class:`EC2NetworkSubnet`
+ """
+ params = {'Action': 'DescribeSubnets'}
+
+ if subnet_ids:
+ params.update(self._pathlist('SubnetId', subnet_ids))
+
+ if filters:
+ params.update(self._build_filters(filters))
+
+ return self._to_subnets(
+ self.connection.request(self.path, params=params).object
+ )
+
+ def ex_create_subnet(self, vpc_id, cidr_block,
+ availability_zone, name=None):
+ """
+ Create a network subnet within a VPC
+
+ :param vpc_id: The ID of the VPC that the subnet should be
+ associated with
+ :type vpc_id: ``str``
+
+ :param cidr_block: The CIDR block assigned to the subnet
+ :type cidr_block: ``str``
+
+ :param availability_zone: The availability zone where the subnet
+ should reside
+ :type availability_zone: ``str``
+
+ :param name: An optional name for the network
+ :type name: ``str``
+
+ :rtype: :class: `EC2NetworkSubnet`
+ """
+ params = {'Action': 'CreateSubnet',
+ 'VpcId': vpc_id,
+ 'CidrBlock': cidr_block,
+ 'AvailabilityZone': availability_zone}
+
+ response = self.connection.request(self.path, params=params).object
+ element = response.findall(fixxpath(xpath='subnet',
+ namespace=NAMESPACE))[0]
+
+ subnet = self._to_subnet(element, name)
+
+ if name and self.ex_create_tags(subnet, {'Name': name}):
+ subnet.extra['tags']['Name'] = name
+
+ return subnet
+
+ def ex_delete_subnet(self, subnet):
+ """
+ Deletes a VPC subnet.
+
+ :param subnet: The subnet to delete
+ :type subnet: :class:`.EC2NetworkSubnet`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_list_security_groups(self):
+ """
+ List existing Security Groups.
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :rtype: ``list`` of ``str``
+ """
+ params = {'Action': 'DescribeSecurityGroups'}
+ response = self.connection.request(self.path, params=params).object
+
+ groups = []
+ for group in findall(element=response, xpath='securityGroupInfo/item',
+ namespace=NAMESPACE):
+ name = findtext(element=group, xpath='groupName',
+ namespace=NAMESPACE)
+ groups.append(name)
+
+ return groups
+
+ def ex_get_security_groups(self, group_ids=None,
+ group_names=None, filters=None):
+ """
+ Return a list of :class:`EC2SecurityGroup` objects for the
+ current region.
+
+ :param group_ids: Return only groups matching the provided
+ group IDs.
+ :type group_ids: ``list``
+
+ :param group_names: Return only groups matching the provided
+ group names.
+ :type group_ids: ``list``
+
+ :param filters: The filters so that the response includes
+ information for only specific security groups.
+ :type filters: ``dict``
+
+ :rtype: ``list`` of :class:`EC2SecurityGroup`
+ """
+
+ params = {'Action': 'DescribeSecurityGroups'}
+
+ if group_ids:
+ params.update(self._pathlist('GroupId', group_ids))
+
+ if group_names:
+ for name_idx, group_name in enumerate(group_names):
+ name_idx += 1 # We want 1-based indexes
+ name_key = 'GroupName.%s' % (name_idx)
+ params[name_key] = group_name
+
+ if filters:
+ params.update(self._build_filters(filters))
+
+ response = self.connection.request(self.path, params=params)
+ return self._to_security_groups(response.object)
+
+ def ex_create_security_group(self, name, description, vpc_id=None):
+ """
+ Creates a new Security Group in EC2-Classic or a targeted VPC.
+
+ :param name: The name of the security group to Create.
+ This must be unique.
+ :type name: ``str``
+
+ :param description: Human readable description of a Security
+ Group.
+ :type description: ``str``
+
+ :param vpc_id: Optional identifier for VPC networks
+ :type vpc_id: ``str``
+
+ :rtype: ``dict``
+ """
+ params = {'Action': 'CreateSecurityGroup',
+ 'GroupName': name,
+ 'GroupDescription': description}
+
+ if vpc_id is not None:
+ params['VpcId'] = vpc_id
+
+ response = self.connection.request(self.path, params=params).object
+ group_id = findattr(element=response, xpath='groupId',
+ namespace=NAMESPACE)
+ return {
+ 'group_id': group_id
+ }
+
+ def ex_delete_security_group_by_id(self, group_id):
+ """
+ Deletes a new Security Group using the group id.
+
+ :param group_id: The ID of the security group
+ :type group_id: ``str``
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_delete_security_group_by_name(self, group_name):
+ """
+ Deletes a new Security Group using the group name.
+
+ :param group_name: The name of the security group
+ :type group_name: ``str``
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_delete_security_group(self, name):
+ """
+ Wrapper method which calls ex_delete_security_group_by_name.
+
+ :param name: The name of the security group
+ :type name: ``str``
+
+ :rtype: ``bool``
+ """
+ return self.ex_delete_security_group_by_name(name)
+
+ def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip,
+ protocol='tcp'):
+ """
+ Edit a Security Group to allow specific traffic.
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param name: The name of the security group to edit
+ :type name: ``str``
+
+ :param from_port: The beginning of the port range to open
+ :type from_port: ``str``
+
+ :param to_port: The end of the port range to open
+ :type to_port: ``str``
+
+ :param cidr_ip: The ip to allow traffic for.
+ :type cidr_ip: ``str``
+
+ :param protocol: tcp/udp/icmp
+ :type protocol: ``str``
+
+ :rtype: ``bool``
+ """
+
+ params = {'Action': 'AuthorizeSecurityGroupIngress',
+ 'GroupName': name,
+ 'IpProtocol': protocol,
+ 'FromPort': str(from_port),
+ 'ToPort': str(to_port),
+ 'CidrIp': cidr_ip}
+ try:
+ res = self.connection.request(
+ self.path, params=params.copy()).object
+ return self._get_boolean(res)
+ except Exception:
+ e = sys.exc_info()[1]
+ if e.args[0].find('InvalidPermission.Duplicate') == -1:
+ raise e
+
+ def ex_authorize_security_group_ingress(self, id, from_port, to_port,
+ cidr_ips=None, group_pairs=None,
+ protocol='tcp'):
+ """
+ Edit a Security Group to allow specific ingress traffic using
+ CIDR blocks or either a group ID, group name or user ID (account).
+
+ :param id: The id of the security group to edit
+ :type id: ``str``
+
+ :param from_port: The beginning of the port range to open
+ :type from_port: ``int``
+
+ :param to_port: The end of the port range to open
+ :type to_port: ``int``
+
+ :param cidr_ips: The list of ip ranges to allow traffic for.
+ :type cidr_ips: ``list``
+
+ :param group_pairs: Source user/group pairs to allow traffic for.
+ More info can be found at http://goo.gl/stBHJF
+
+ EC2 Classic Example: To allow access from any system
+ associated with the default group on account 1234567890
+
+ [{'group_name': 'default', 'user_id': '1234567890'}]
+
+ VPC Example: Allow access from any system associated with
+ security group sg-47ad482e on your own account
+
+ [{'group_id': ' sg-47ad482e'}]
+ :type group_pairs: ``list`` of ``dict``
+
+ :param protocol: tcp/udp/icmp
+ :type protocol: ``str``
+
+ :rtype: ``bool``
+ """
+
+ params = self._get_common_security_group_params(id,
+ protocol,
+ from_port,
+ to_port,
+ cidr_ips,
+ group_pairs)
+
+ params["Action"] = 'AuthorizeSecurityGroupIngress'
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_authorize_security_group_egress(self, id, from_port, to_port,
+ cidr_ips, group_pairs=None,
+ protocol='tcp'):
+ """
+ Edit a Security Group to allow specific egress traffic using
+ CIDR blocks or either a group ID, group name or user ID (account).
+ This call is not supported for EC2 classic and only works for VPC
+ groups.
+
+ :param id: The id of the security group to edit
+ :type id: ``str``
+
+ :param from_port: The beginning of the port range to open
+ :type from_port: ``int``
+
+ :param to_port: The end of the port range to open
+ :type to_port: ``int``
+
+ :param cidr_ips: The list of ip ranges to allow traffic for.
+ :type cidr_ips: ``list``
+
+ :param group_pairs: Source user/group pairs to allow traffic for.
+ More info can be found at http://goo.gl/stBHJF
+
+ EC2 Classic Example: To allow access from any system
+ associated with the default group on account 1234567890
+
+ [{'group_name': 'default', 'user_id': '1234567890'}]
+
+ VPC Example: Allow access from any system associated with
+ security group sg-47ad482e on your own account
+
+ [{'group_id': ' sg-47ad482e'}]
+ :type group_pairs: ``list`` of ``dict``
+
+ :param protocol: tcp/udp/icmp
+ :type protocol: ``str``
+
+ :rtype: ``bool``
+ """
+
+ params = self._get_common_security_group_params(id,
+ protocol,
+ from_port,
+ to_port,
+ cidr_ips,
+ group_pairs)
+
+ params["Action"] = 'AuthorizeSecurityGroupEgress'
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_revoke_security_group_ingress(self, id, from_port, to_port,
+ cidr_ips=None, group_pairs=None,
+ protocol='tcp'):
+ """
+ Edit a Security Group to revoke specific ingress traffic using
+ CIDR blocks or either a group ID, group name or user ID (account).
+
+ :param id: The id of the security group to edit
+ :type id: ``str``
+
+ :param from_port: The beginning of the port range to open
+ :type from_port: ``int``
+
+ :param to_port: The end of the port range to open
+ :type to_port: ``int``
+
+ :param cidr_ips: The list of ip ranges to allow traffic for.
+ :type cidr_ips: ``list``
+
+ :param group_pairs: Source user/group pairs to allow traffic for.
+ More info can be found at http://goo.gl/stBHJF
+
+ EC2 Classic Example: To allow access from any system
+ associated with the default group on account 1234567890
+
+ [{'group_name': 'default', 'user_id': '1234567890'}]
+
+ VPC Example: Allow access from any system associated with
+ security group sg-47ad482e on your own account
+
+ [{'group_id': ' sg-47ad482e'}]
+ :type group_pairs: ``list`` of ``dict``
+
+ :param protocol: tcp/udp/icmp
+ :type protocol: ``str``
+
+ :rtype: ``bool``
+ """
+
+ params = self._get_common_security_group_params(id,
+ protocol,
+ from_port,
+ to_port,
+ cidr_ips,
+ group_pairs)
+
+ params["Action"] = 'RevokeSecurityGroupIngress'
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_revoke_security_group_egress(self, id, from_port, to_port,
+ cidr_ips=None, group_pairs=None,
+ protocol='tcp'):
+ """
+ Edit a Security Group to revoke specific egress traffic using
+ CIDR blocks or either a group ID, group name or user ID (account).
+ This call is not supported for EC2 classic and only works for
+ VPC groups.
+
+ :param id: The id of the security group to edit
+ :type id: ``str``
+
+ :param from_port: The beginning of the port range to open
+ :type from_port: ``int``
+
+ :param to_port: The end of the port range to open
+ :type to_port: ``int``
+
+ :param cidr_ips: The list of ip ranges to allow traffic for.
+ :type cidr_ips: ``list``
+
+ :param group_pairs: Source user/group pairs to allow traffic for.
+ More info can be found at http://goo.gl/stBHJF
+
+ EC2 Classic Example: To allow access from any system
+ associated with the default group on account 1234567890
+
+ [{'group_name': 'default', 'user_id': '1234567890'}]
+
+ VPC Example: Allow access from any system associated with
+ security group sg-47ad482e on your own account
+
+ [{'group_id': ' sg-47ad482e'}]
+ :type group_pairs: ``list`` of ``dict``
+
+ :param protocol: tcp/udp/icmp
+ :type protocol: ``str``
+
+ :rtype: ``bool``
+ """
+
+ params = self._get_common_security_group_params(id,
+ protocol,
+ from_port,
+ to_port,
+ cidr_ips,
+ group_pairs)
+
+ params['Action'] = 'RevokeSecurityGroupEgress'
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_authorize_security_group_permissive(self, name):
+ """
+ Edit a Security Group to allow all traffic.
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param name: The name of the security group to edit
+ :type name: ``str``
+
+ :rtype: ``list`` of ``str``
+ """
+
+ results = []
+ params = {'Action': 'AuthorizeSecurityGroupIngress',
+ 'GroupName': name,
+ 'IpProtocol': 'tcp',
+ 'FromPort': '0',
+ 'ToPort': '65535',
+ 'CidrIp': '0.0.0.0/0'}
+ try:
+ results.append(
+ self.connection.request(self.path, params=params.copy()).object
+ )
+ except Exception:
+ e = sys.exc_info()[1]
+ if e.args[0].find("InvalidPermission.Duplicate") == -1:
+ raise e
+ params['IpProtocol'] = 'udp'
+
+ try:
+ results.append(
+ self.connection.request(self.path, params=params.copy()).object
+ )
+ except Exception:
+ e = sys.exc_info()[1]
+ if e.args[0].find("InvalidPermission.Duplicate") == -1:
+ raise e
+
+ params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
+
+ try:
+ results.append(
+ self.connection.request(self.path, params=params.copy()).object
+ )
+ except Exception:
+ e = sys.exc_info()[1]
+
+ if e.args[0].find("InvalidPermission.Duplicate") == -1:
+ raise e
+ return results
+
+ def ex_list_availability_zones(self, only_available=True):
+ """
+ Return a list of :class:`ExEC2AvailabilityZone` objects for the
+ current region.
+
+ Note: This is an extension method and is only available for EC2
+ driver.
+
+ :keyword only_available: If true, return only availability zones
+ with state 'available'
+ :type only_available: ``str``
+
+ :rtype: ``list`` of :class:`ExEC2AvailabilityZone`
+ """
+ params = {'Action': 'DescribeAvailabilityZones'}
+
+ filters = {'region-name': self.region_name}
+ if only_available:
+ filters['state'] = 'available'
+
+ params.update(self._build_filters(filters))
+
+ result = self.connection.request(self.path,
+ params=params.copy()).object
+
+ availability_zones = []
+ for element in findall(element=result,
+ xpath='availabilityZoneInfo/item',
+ namespace=NAMESPACE):
+ name = findtext(element=element, xpath='zoneName',
+ namespace=NAMESPACE)
+ zone_state = findtext(element=element, xpath='zoneState',
+ namespace=NAMESPACE)
+ region_name = findtext(element=element, xpath='regionName',
+ namespace=NAMESPACE)
+
+ availability_zone = ExEC2AvailabilityZone(
+ name=name,
+ zone_state=zone_state,
+ region_name=region_name
+ )
+ availability_zones.append(availability_zone)
+
+ return availability_zones
+
+ def ex_describe_tags(self, resource):
+ """
+ Return a dictionary of tags for a resource (Node or StorageVolume).
+
+ :param resource: resource which should be used
+ :type resource: :class:`Node` or :class:`StorageVolume`
+
+ :return: dict Node tags
+ :rtype: ``dict``
+ """
+ params = {'Action': 'DescribeTags'}
+
+ filters = {
+ 'resource-id': resource.id,
+ 'resource-type': 'instance'
+ }
+
+ params.update(self._build_filters(filters))
+
+ result = self.connection.request(self.path, params=params).object
+
+ return self._get_resource_tags(result)
+
+ def ex_create_tags(self, resource, tags):
+ """
+ Create tags for a resource (Node or StorageVolume).
+
+ :param resource: Resource to be tagged
+ :type resource: :class:`Node` or :class:`StorageVolume`
+
+ :param tags: A dictionary or other mapping of strings to strings,
+ associating tag names with tag values.
+ :type tags: ``dict``
+
+ :rtype: ``bool``
+ """
+ if not tags:
+ return
+
+ params = {'Action': 'CreateTags',
+ 'ResourceId.0': resource.id}
+ for i, key in enumerate(tags):
+ params['Tag.%d.Key' % i] = key
+ params['Tag.%d.Value' % i] = tags[key]
+
+ res = self.connection.request(self.path,
+ params=params.copy()).object
+
+ return self._get_boolean(res)
+
+ def ex_delete_tags(self, resource, tags):
+ """
+ Delete tags from a resource.
+
+ :param resource: Resource to be tagged
+ :type resource: :class:`Node` or :class:`StorageVolume`
+
+ :param tags: A dictionary or other mapping of strings to strings,
+ specifying the tag names and tag values to be deleted.
+ :type tags: ``dict``
+
+ :rtype: ``bool``
+ """
+ if not tags:
+ return
+
+ params = {'Action': 'DeleteTags',
+ 'ResourceId.0': resource.id}
+ for i, key in enumerate(tags):
+ params['Tag.%d.Key' % i] = key
+ params['Tag.%d.Value' % i] = tags[key]
+
+ res = self.connection.request(self.path,
+ params=params.copy()).object
+
+ return self._get_boolean(res)
+
+ def ex_get_metadata_for_node(self, node):
+ """
+ Return the metadata associated with the node.
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :return: A dictionary or other mapping of strings to strings,
+ associating tag names with tag values.
+ :rtype tags: ``dict``
+ """
+ return node.extra['tags']
+
+ def ex_allocate_address(self, domain='standard'):
+ """
+ Allocate a new Elastic IP address for EC2 classic or VPC
+
+ :param domain: The domain to allocate the new address in
+ (standard/vpc)
+ :type domain: ``str``
+
+ :return: Instance of ElasticIP
+ :rtype: :class:`ElasticIP`
+ """
+ params = {'Action': 'AllocateAddress'}
+
+ if domain == 'vpc':
+ params['Domain'] = domain
+
+ response = self.connection.request(self.path, params=params).object
+
+ return self._to_address(response, only_associated=False)
+
+ def ex_release_address(self, elastic_ip, domain=None):
+ """
+ Release an Elastic IP address using the IP (EC2-Classic) or
+ using the allocation ID (VPC)
+
+ :param elastic_ip: Elastic IP instance
+ :type elastic_ip: :class:`ElasticIP`
+
+ :param domain: The domain where the IP resides (vpc only)
+ :type domain: ``str``
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+ params = {'Action': 'ReleaseAddress'}
+
+ if domain is not None and domain != 'vpc':
+ raise AttributeError('Domain can only be set to vpc')
+
+ if domain is None:
+ params['PublicIp'] = elastic_ip.ip
+ else:
+ params['AllocationId'] = elastic_ip.extra['allocation_id']
+
+ response = self.connection.request(self.path, params=params).object
+ return self._get_boolean(response)
+
+ def ex_describe_all_addresses(self, only_associated=False):
+ """
+ Return all the Elastic IP addresses for this account
+ optionally, return only addresses associated with nodes
+
+ :param only_associated: If true, return only those addresses
+ that are associated with an instance.
+ :type only_associated: ``bool``
+
+ :return: List of ElasticIP instances.
+ :rtype: ``list`` of :class:`ElasticIP`
+ """
+ params = {'Action': 'DescribeAddresses'}
+
+ response = self.connection.request(self.path, params=params).object
+
+ # We will send our only_associated boolean over to
+ # shape how the return data is sent back
+ return self._to_addresses(response, only_associated)
+
+ def ex_associate_address_with_node(self, node, elastic_ip, domain=None):
+ """
+ Associate an Elastic IP address with a particular node.
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :param elastic_ip: Elastic IP instance
+ :type elastic_ip: :class:`ElasticIP`
+
+ :param domain: The domain where the IP resides (vpc only)
+ :type domain: ``str``
+
+ :return: A string representation of the association ID which is
+ required for VPC disassociation. EC2/standard
+ addresses return None
+ :rtype: ``None`` or ``str``
+ """
+ params = {'Action': 'AssociateAddress', 'InstanceId': node.id}
+
+ if domain is not None and domain != 'vpc':
+ raise AttributeError('Domain can only be set to vpc')
+
+ if domain is None:
+ params.update({'PublicIp': elastic_ip.ip})
+ else:
+ params.update({'AllocationId': elastic_ip.extra['allocation_id']})
+
+ response = self.connection.request(self.path, params=params).object
+ association_id = findtext(element=response,
+ xpath='associationId',
+ namespace=NAMESPACE)
+ return association_id
+
+ def ex_associate_addresses(self, node, elastic_ip, domain=None):
+ """
+ Note: This method has been deprecated in favor of
+ the ex_associate_address_with_node method.
+ """
+
+ return self.ex_associate_address_with_node(node=node,
+ elastic_ip=elastic_ip,
+ domain=domain)
+
+ def ex_disassociate_address(self, elastic_ip, domain=None):
+ """
+ Disassociate an Elastic IP address using the IP (EC2-Classic)
+ or the association ID (VPC)
+
+ :param elastic_ip: ElasticIP instance
+ :type elastic_ip: :class:`ElasticIP`
+
+ :param domain: The domain where the IP resides (vpc only)
+ :type domain: ``str``
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DisassociateAddress'}
+
+ if domain is not None and domain != 'vpc':
+ raise AttributeError('Domain can only be set to vpc')
+
+ if domain is None:
+ params['PublicIp'] = elastic_ip.ip
+
+ else:
+ params['AssociationId'] = elastic_ip.extra['association_id']
+
+ res = self.connection.request(self.path, params=params).object
+ return self._get_boolean(res)
+
+ def ex_describe_addresses(self, nodes):
+ """
+ Return Elastic IP addresses for all the nodes in the provided list.
+
+ :param nodes: List of :class:`Node` instances
+ :type nodes: ``list`` of :class:`Node`
+
+ :return: Dictionary where a key is a node ID and the value is a
+ list with the Elastic IP addresses associated with
+ this node.
+ :rtype: ``dict``
+ """
+ if not nodes:
+ return {}
+
+ params = {'Action': 'DescribeAddresses'}
+
+ if len(nodes) == 1:
+ self._add_instance_filter(params, nodes[0])
+
+ result = self.connection.request(self.path, params=params).object
+
+ node_instance_ids = [node.id for node in nodes]
+ nodes_elastic_ip_mappings = {}
+
+ # We will set only_associated to True so that we only get back
+ # IPs which are associated with instances
+ only_associated = True
+
+ for node_id in node_instance_ids:
+ nodes_elastic_ip_mappings.setdefault(node_id, [])
+ for addr in self._to_addresses(result,
+ only_associated):
+
+ instance_id = addr.instance_id
+
+ if node_id == instance_id:
+ nodes_elastic_ip_mappings[instance_id].append(
+ addr.ip)
+
+ return nodes_elastic_ip_mappings
+
+ def ex_describe_addresses_for_node(self, node):
+ """
+ Return a list of Elastic IP addresses associated with this node.
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :return: list Elastic IP addresses attached to this node.
+ :rtype: ``list`` of ``str``
+ """
+ node_elastic_ips = self.ex_describe_addresses([node])
+ return node_elastic_ips[node.id]
+
+ # Network interface management methods
+
+ def ex_list_network_interfaces(self):
+ """
+ Return all network interfaces
+
+ :return: List of EC2NetworkInterface instances
+ :rtype: ``list`` of :class `EC2NetworkInterface`
+ """
+ params = {'Action': 'DescribeNetworkInterfaces'}
+
+ return self._to_interfaces(
+ self.connection.request(self.path, params=params).object
+ )
+
+ def ex_create_network_interface(self, subnet, name=None,
+ description=None,
+ private_ip_address=None):
+ """
+ Create a network interface within a VPC subnet.
+
+ :param subnet: EC2NetworkSubnet instance
+ :type subnet: :class:`EC2NetworkSubnet`
+
+ :param name: Optional name of the interface
+ :type name: ``str``
+
+ :param description: Optional description of the network interface
+ :type description: ``str``
+
+ :param private_ip_address: Optional address to assign as the
+ primary private IP address of the
+ interface. If one is not provided then
+ Amazon will automatically auto-assign
+ an available IP. EC2 allows assignment
+ of multiple IPs, but this will be
+ the primary.
+ :type private_ip_address: ``str``
+
+ :return: EC2NetworkInterface instance
+ :rtype: :class `EC2NetworkInterface`
+ """
+ params = {'Action': 'CreateNetworkInterface',
+ 'SubnetId': subnet.id}
+
+ if description:
+ params['Description'] = description
+
+ if private_ip_address:
+ params['PrivateIpAddress'] = private_ip_address
+
+ response = self.connection.request(self.path, params=params).object
+
+ element = response.findall(fixxpath(xpath='networkInterface',
+ namespace=NAMESPACE))[0]
+
+ interface = self._to_interface(element, name)
+
+ if name and self.ex_create_tags(interface, {'Name': name}):
+ interface.extra['tags']['Name'] = name
+
+ return interface
+
+ def ex_delete_network_interface(self, network_interface):
+ """
+ Deletes a network interface.
+
+ :param network_interface: EC2NetworkInterface instance
+ :type network_interface: :class:`EC2NetworkInterface`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteNetworkInterface',
+ 'NetworkInterfaceId': network_interface.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_attach_network_interface_to_node(self, network_interface,
+ node, device_index):
+ """
+ Attatch a network interface to an instance.
+
+ :param network_interface: EC2NetworkInterface instance
+ :type network_interface: :class:`EC2NetworkInterface`
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :param device_index: The interface device index
+ :type device_index: ``int``
+
+ :return: String representation of the attachment id.
+ This is required to detach the interface.
+ :rtype: ``str``
+ """
+ params = {'Action': 'AttachNetworkInterface',
+ 'NetworkInterfaceId': network_interface.id,
+ 'InstanceId': node.id,
+ 'DeviceIndex': device_index}
+
+ response = self.connection.request(self.path, params=params).object
+ attachment_id = findattr(element=response, xpath='attachmentId',
+ namespace=NAMESPACE)
+
+ return attachment_id
+
+ def ex_detach_network_interface(self, attachment_id, force=False):
+ """
+ Detatch a network interface from an instance.
+
+ :param attachment_id: The attachment ID associated with the
+ interface
+ :type attachment_id: ``str``
+
+ :param force: Forces the detachment.
+ :type force: ``bool``
+
+ :return: ``True`` on successful detachment, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DetachNetworkInterface',
+ 'AttachmentId': attachment_id}
+
+ if force:
+ params['Force'] = True
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_modify_instance_attribute(self, node, attributes):
+ """
+ Modify node attributes.
+ A list of valid attributes can be found at http://goo.gl/gxcj8
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :param attributes: Dictionary with node attributes
+ :type attributes: ``dict``
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+ attributes = attributes or {}
+ attributes.update({'InstanceId': node.id})
+
+ params = {'Action': 'ModifyInstanceAttribute'}
+ params.update(attributes)
+
+ res = self.connection.request(self.path,
+ params=params.copy()).object
+
+ return self._get_boolean(res)
+
+ def ex_modify_image_attribute(self, image, attributes):
+ """
+ Modify image attributes.
+
+ :param image: NodeImage instance
+ :type image: :class:`NodeImage`
+
+ :param attributes: Dictionary with node attributes
+ :type attributes: ``dict``
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+ attributes = attributes or {}
+ attributes.update({'ImageId': image.id})
+
+ params = {'Action': 'ModifyImageAttribute'}
+ params.update(attributes)
+
+ res = self.connection.request(self.path,
+ params=params.copy()).object
+
+ return self._get_boolean(res)
+
+ def ex_change_node_size(self, node, new_size):
+ """
+ Change the node size.
+ Note: Node must be turned of before changing the size.
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :param new_size: NodeSize intance
+ :type new_size: :class:`NodeSize`
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+ if 'instancetype' in node.extra:
+ current_instance_type = node.extra['instancetype']
+
+ if current_instance_type == new_size.id:
+ raise ValueError('New instance size is the same as' +
+ 'the current one')
+
+ attributes = {'InstanceType.Value': new_size.id}
+ return self.ex_modify_instance_attribute(node, attributes)
+
+ def ex_start_node(self, node):
+ """
+ Start the node by passing in the node object, does not work with
+ instance store backed instances
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'StartInstances'}
+ params.update(self._pathlist('InstanceId', [node.id]))
+ res = self.connection.request(self.path, params=params).object
+ return self._get_state_boolean(res)
+
+ def ex_stop_node(self, node):
+ """
+ Stop the node by passing in the node object, does not work with
+ instance store backed instances
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'StopInstances'}
+ params.update(self._pathlist('InstanceId', [node.id]))
+ res = self.connection.request(self.path, params=params).object
+ return self._get_state_boolean(res)
+
+ def ex_get_console_output(self, node):
+ """
+ Get console output for the node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :return: Dictionary with the following keys:
+ - instance_id (``str``)
+ - timestamp (``datetime.datetime``) - ts of the last output
+ - output (``str``) - console output
+ :rtype: ``dict``
+ """
+ params = {
+ 'Action': 'GetConsoleOutput',
+ 'InstanceId': node.id
+ }
+
+ response = self.connection.request(self.path, params=params).object
+
+ timestamp = findattr(element=response,
+ xpath='timestamp',
+ namespace=NAMESPACE)
+
+ encoded_string = findattr(element=response,
+ xpath='output',
+ namespace=NAMESPACE)
+
+ timestamp = parse_date(timestamp)
+ output = base64.b64decode(b(encoded_string)).decode('utf-8')
+
+ return {'instance_id': node.id,
+ 'timestamp': timestamp,
+ 'output': output}
+
+ def ex_list_reserved_nodes(self):
+ """
+ List all reserved instances/nodes which can be purchased from Amazon
+ for one or three year terms. Reservations are made at a region level
+ and reduce the hourly charge for instances.
+
+ More information can be found at http://goo.gl/ulXCC7.
+
+ :rtype: ``list`` of :class:`.EC2ReservedNode`
+ """
+ params = {'Action': 'DescribeReservedInstances'}
+
+ response = self.connection.request(self.path, params=params).object
+
+ return self._to_reserved_nodes(response, 'reservedInstancesSet/item')
+
+ # Account specific methods
+
+ def ex_get_limits(self):
+ """
+ Retrieve account resource limits.
+
+ :rtype: ``dict``
+ """
+ attributes = ['max-instances', 'max-elastic-ips',
+ 'vpc-max-elastic-ips']
+ params = {}
+ params['Action'] = 'DescribeAccountAttributes'
+
+ for index, attribute in enumerate(attributes):
+ params['AttributeName.%s' % (index)] = attribute
+
+ response = self.connection.request(self.path, params=params)
+ data = response.object
+
+ elems = data.findall(fixxpath(xpath='accountAttributeSet/item',
+ namespace=NAMESPACE))
+
+ result = {'resource': {}}
+
+ for elem in elems:
+ name = findtext(element=elem, xpath='attributeName',
+ namespace=NAMESPACE)
+ value = findtext(element=elem,
+ xpath='attributeValueSet/item/attributeValue',
+ namespace=NAMESPACE)
+
+ result['resource'][name] = int(value)
+
+ return result
+
+ # Deprecated extension methods
+
+ def ex_list_keypairs(self):
+ """
+ Lists all the keypair names and fingerprints.
+
+ :rtype: ``list`` of ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'list_key_pairs method')
+
+ key_pairs = self.list_key_pairs()
+
+ result = []
+
+ for key_pair in key_pairs:
+ item = {
+ 'keyName': key_pair.name,
+ 'keyFingerprint': key_pair.fingerprint,
+ }
+ result.append(item)
+
+ return result
+
+ def ex_describe_all_keypairs(self):
+ """
+ Return names for all the available key pairs.
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :rtype: ``list`` of ``str``
+ """
+ names = [key_pair.name for key_pair in self.list_key_pairs()]
+ return names
+
+ def ex_describe_keypairs(self, name):
+ """
+ Here for backward compatibility.
+ """
+ return self.ex_describe_keypair(name=name)
+
+ def ex_describe_keypair(self, name):
+ """
+ Describes a keypair by name.
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param name: The name of the keypair to describe.
+ :type name: ``str``
+
+ :rtype: ``dict``
+ """
+
+ params = {
+ 'Action': 'DescribeKeyPairs',
+ 'KeyName.1': name
+ }
+
+ response = self.connection.request(self.path, params=params).object
+ key_name = findattr(element=response, xpath='keySet/item/keyName',
+ namespace=NAMESPACE)
+ fingerprint = findattr(element=response,
+ xpath='keySet/item/keyFingerprint',
+ namespace=NAMESPACE).strip()
+ return {
+ 'keyName': key_name,
+ 'keyFingerprint': fingerprint
+ }
+
+ def ex_create_keypair(self, name):
+ """
+ Creates a new keypair
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param name: The name of the keypair to Create. This must be
+ unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
+ :type name: ``str``
+
+ :rtype: ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'create_key_pair method')
+
+ key_pair = self.create_key_pair(name=name)
+
+ result = {
+ 'keyMaterial': key_pair.private_key,
+ 'keyFingerprint': key_pair.fingerprint
+ }
+
+ return result
+
+ def ex_delete_keypair(self, keypair):
+ """
+ Delete a key pair by name.
+
+ @note: This is a non-standard extension API, and only works with EC2.
+
+ :param keypair: The name of the keypair to delete.
+ :type keypair: ``str``
+
+ :rtype: ``bool``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'delete_key_pair method')
+
+ keypair = KeyPair(name=keypair, public_key=None, fingerprint=None,
+ driver=self)
+
+ return self.delete_key_pair(keypair)
+
+ def ex_import_keypair_from_string(self, name, key_material):
+ """
+ imports a new public key where the public key is passed in as a string
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param name: The name of the public key to import. This must be
+ unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
+ :type name: ``str``
+
+ :param key_material: The contents of a public key file.
+ :type key_material: ``str``
+
+ :rtype: ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'import_key_pair_from_string method')
+
+ key_pair = self.import_key_pair_from_string(name=name,
+ key_material=key_material)
+
+ result = {
+ 'keyName': key_pair.name,
+ 'keyFingerprint': key_pair.fingerprint
+ }
+ return result
+
+ def ex_import_keypair(self, name, keyfile):
+ """
+ imports a new public key where the public key is passed via a filename
+
+ @note: This is a non-standard extension API, and only works for EC2.
+
+ :param name: The name of the public key to import. This must be
+ unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
+ :type name: ``str``
+
+ :param keyfile: The filename with path of the public key to import.
+ :type keyfile: ``str``
+
+ :rtype: ``dict``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'import_key_pair_from_file method')
+
+ key_pair = self.import_key_pair_from_file(name=name,
+ key_file_path=keyfile)
+
+ result = {
+ 'keyName': key_pair.name,
+ 'keyFingerprint': key_pair.fingerprint
+ }
+ return result
+
+ def ex_find_or_import_keypair_by_key_material(self, pubkey):
+ """
+ Given a public key, look it up in the EC2 KeyPair database. If it
+ exists, return any information we have about it. Otherwise, create it.
+
+ Keys that are created are named based on their comment and fingerprint.
+
+ :rtype: ``dict``
+ """
+ key_fingerprint = get_pubkey_ssh2_fingerprint(pubkey)
+ key_comment = get_pubkey_comment(pubkey, default='unnamed')
+ key_name = '%s-%s' % (key_comment, key_fingerprint)
+
+ key_pairs = self.list_key_pairs()
+ key_pairs = [key_pair for key_pair in key_pairs if
+ key_pair.fingerprint == key_fingerprint]
+
+ if len(key_pairs) >= 1:
+ key_pair = key_pairs[0]
+ result = {
+ 'keyName': key_pair.name,
+ 'keyFingerprint': key_pair.fingerprint
+ }
+ else:
+ result = self.ex_import_keypair_from_string(key_name, pubkey)
+
+ return result
+
+ def ex_list_internet_gateways(self, gateway_ids=None, filters=None):
+ """
+ Describes available Internet gateways and whether or not they are
+ attached to a VPC. These are required for VPC nodes to communicate
+ over the Internet.
+
+ :param gateway_ids: Return only intenet gateways matching the
+ provided internet gateway IDs. If not
+ specified, a list of all the internet
+ gateways in the corresponding region is
+ returned.
+ :type gateway_ids: ``list``
+
+ :param filters: The filters so that the response includes
+ information for only certain gateways.
+ :type filters: ``dict``
+
+ :rtype: ``list`` of :class:`.VPCInternetGateway`
+ """
+ params = {'Action': 'DescribeInternetGateways'}
+
+ if gateway_ids:
+ params.update(self._pathlist('InternetGatewayId', gateway_ids))
+
+ if filters:
+ params.update(self._build_filters(filters))
+
+ response = self.connection.request(self.path, params=params).object
+
+ return self._to_internet_gateways(response, 'internetGatewaySet/item')
+
+ def ex_create_internet_gateway(self, name=None):
+ """
+ Delete a VPC Internet gateway
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'CreateInternetGateway'}
+
+ resp = self.connection.request(self.path, params=params).object
+
+ element = resp.findall(fixxpath(xpath='internetGateway',
+ namespace=NAMESPACE))
+
+ gateway = self._to_internet_gateway(element[0], name)
+
+ if name and self.ex_create_tags(gateway, {'Name': name}):
+ gateway.extra['tags']['Name'] = name
+
+ return gateway
+
+ def ex_delete_internet_gateway(self, gateway):
+ """
+ Delete a VPC Internet gateway
+
+ :param gateway: The gateway to delete
+ :type gateway: :class:`.VPCInternetGateway`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteInternetGateway',
+ 'InternetGatewayId': gateway.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_attach_internet_gateway(self, gateway, network):
+ """
+ Attach a Internet gateway to a VPC
+
+ :param gateway: The gateway to attach
+ :type gateway: :class:`.VPCInternetGateway`
+
+ :param network: The VPC network to attach to
+ :type network: :class:`.EC2Network`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'AttachInternetGateway',
+ 'InternetGatewayId': gateway.id,
+ 'VpcId': network.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_detach_internet_gateway(self, gateway, network):
+ """
+ Detach a Internet gateway from a VPC
+
+ :param gateway: The gateway to detach
+ :type gateway: :class:`.VPCInternetGateway`
+
+ :param network: The VPC network to detach from
+ :type network: :class:`.EC2Network`
+
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DetachInternetGateway',
+ 'InternetGatewayId': gateway.id,
+ 'VpcId': network.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_list_route_tables(self, route_table_ids=None, filters=None):
+ """
+ Describes one or more of a VPC's route tables.
+ These are are used to determine where network traffic is directed.
+
+ :param route_table_ids: Return only route tables matching the
+ provided route table IDs. If not specified,
+ a list of all the route tables in the
+ corresponding region is returned.
+ :type route_table_ids: ``list``
+
+ :param filters: The filters so that the response includes
+ information for only certain route tables.
+ :type filters: ``dict``
+
+ :rtype: ``list`` of :class:`.EC2RouteTable`
+ """
+ params = {'Action': 'DescribeRouteTables'}
+
+ if route_table_ids:
+ params.update(self._pathlist('RouteTableId', route_table_ids))
+
+ if filters:
+ params.update(self._build_filters(filters))
+
+ response = self.connection.request(self.path, params=params)
+
+ return self._to_route_tables(response.object)
+
+ def ex_create_route_table(self, network, name=None):
+ """
+ Create a route table within a VPC.
+
+ :param vpc_id: The VPC that the subnet should be created in.
+ :type vpc_id: :class:`.EC2Network`
+
+ :rtype: :class: `.EC2RouteTable`
+ """
+ params = {'Action': 'CreateRouteTable',
+ 'VpcId': network.id}
+
+ response = self.connection.request(self.path, params=params).object
+ element = response.findall(fixxpath(xpath='routeTable',
+ namespace=NAMESPACE))[0]
+
+ route_table = self._to_route_table(element, name=name)
+
+ if name and self.ex_create_tags(route_table, {'Name': name}):
+ route_table.extra['tags']['Name'] = name
+
+ return route_table
+
+ def ex_delete_route_table(self, route_table):
+ """
+ Deletes a VPC route table.
+
+ :param route_table: The route table to delete.
+ :type route_table: :class:`.EC2RouteTable`
+
+ :rtype: ``bool``
+ """
+
+ params = {'Action': 'DeleteRouteTable',
+ 'RouteTableId': route_table.id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_associate_route_table(self, route_table, subnet):
+ """
+ Associates a route table with a subnet within a VPC.
+
+ Note: A route table can be associated with multiple subnets.
+
+ :param route_table: The route table to associate.
+ :type route_table: :class:`.EC2RouteTable`
+
+ :param subnet: The subnet to associate with.
+ :type subnet: :class:`.EC2Subnet`
+
+ :return: Route table association ID.
+ :rtype: ``str``
+ """
+
+ params = {'Action': 'AssociateRouteTable',
+ 'RouteTableId': route_table.id,
+ 'SubnetId': subnet.id}
+
+ result = self.connection.request(self.path, params=params).object
+ association_id = findtext(element=result,
+ xpath='associationId',
+ namespace=NAMESPACE)
+
+ return association_id
+
+ def ex_dissociate_route_table(self, subnet_association):
+ """
+ Dissociates a subnet from a route table.
+
+ :param subnet_association: The subnet association object or
+ subnet association ID.
+ :type subnet_association: :class:`.EC2SubnetAssociation` or
+ ``str``
+
+ :rtype: ``bool``
+ """
+
+ if isinstance(subnet_association, EC2SubnetAssociation):
+ subnet_association_id = subnet_association.id
+ else:
+ subnet_association_id = subnet_association
+
+ params = {'Action': 'DisassociateRouteTable',
+ 'AssociationId': subnet_association_id}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_replace_route_table_association(self, subnet_association,
+ route_table):
+ """
+ Changes the route table associated with a given subnet in a VPC.
+
+ Note: This method can be used to change which table is the main route
+ table in the VPC (Specify the main route table's association ID
+ and the route table to be the new main route table).
+
+ :param subnet_association: The subnet association object or
+ subnet association ID.
+ :type subnet_association: :class:`.EC2SubnetAssociation` or
+ ``str``
+
+ :param route_table: The new route table to associate.
+ :type route_table: :class:`.EC2RouteTable`
+
+ :return: New route table association ID.
+ :rtype: ``str``
+ """
+
+ if isinstance(subnet_association, EC2SubnetAssociation):
+ subnet_association_id = subnet_association.id
+ else:
+ subnet_association_id = subnet_association
+
+ params = {'Action': 'ReplaceRouteTableAssociation',
+ 'AssociationId': subnet_association_id,
+ 'RouteTableId': route_table.id}
+
+ result = self.connection.request(self.path, params=params).object
+ new_association_id = findtext(element=result,
+ xpath='newAssociationId',
+ namespace=NAMESPACE)
+
+ return new_association_id
+
+ def ex_create_route(self, route_table, cidr,
+ internet_gateway=None, node=None,
+ network_interface=None, vpc_peering_connection=None):
+ """
+ Creates a route entry in the route table.
+
+ :param route_table: The route table to create the route in.
+ :type route_table: :class:`.EC2RouteTable`
+
+ :param cidr: The CIDR block used for the destination match.
+ :type cidr: ``str``
+
+ :param internet_gateway: The internet gateway to route
+ traffic through.
+ :type internet_gateway: :class:`.VPCInternetGateway`
+
+ :param node: The NAT instance to route traffic through.
+ :type node: :class:`Node`
+
+ :param network_interface: The network interface of the node
+ to route traffic through.
+ :type network_interface: :class:`.EC2NetworkInterface`
+
+ :param vpc_peering_connection: The VPC peering connection.
+ :type vpc_peering_connection: :class:`.VPCPeeringConnection`
+
+ :rtype: ``bool``
+
+ Note: You must specify one of the following: internet_gateway,
+ node, network_interface, vpc_peering_connection.
+ """
+
+ params = {'Action': 'CreateRoute',
+ 'RouteTableId': route_table.id,
+ 'DestinationCidrBlock': cidr}
+
+ if internet_gateway:
+ params['GatewayId'] = internet_gateway.id
+
+ if node:
+ params['InstanceId'] = node.id
+
+ if network_interface:
+ params['NetworkInterfaceId'] = network_interface.id
+
+ if vpc_peering_connection:
+ params['VpcPeeringConnectionId'] = vpc_peering_connection.id
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_delete_route(self, route_table, cidr):
+ """
+ Deletes a route entry from the route table.
+
+ :param route_table: The route table to delete the route from.
+ :type route_table: :class:`.EC2RouteTable`
+
+ :param cidr: The CIDR block used for the destination match.
+ :type cidr: ``str``
+
+ :rtype: ``bool``
+ """
+
+ params = {'Action': 'DeleteRoute',
+ 'RouteTableId': route_table.id,
+ 'DestinationCidrBlock': cidr}
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def ex_replace_route(self, route_table, cidr,
+ internet_gateway=None, node=None,
+ network_interface=None, vpc_peering_connection=None):
+ """
+ Replaces an existing route entry within a route table in a VPC.
+
+ :param route_table: The route table to replace the route in.
+ :type route_table: :class:`.EC2RouteTable`
+
+ :param cidr: The CIDR block used for the destination match.
+ :type cidr: ``str``
+
+ :param internet_gateway: The new internet gateway to route
+ traffic through.
+ :type internet_gateway: :class:`.VPCInternetGateway`
+
+ :param node: The new NAT instance to route traffic through.
+ :type node: :class:`Node`
+
+ :param network_interface: The new network interface of the node
+ to route traffic through.
+ :type network_interface: :class:`.EC2NetworkInterface`
+
+ :param vpc_peering_connection: The new VPC peering connection.
+ :type vpc_peering_connection: :class:`.VPCPeeringConnection`
+
+ :rtype: ``bool``
+
+ Note: You must specify one of the following: internet_gateway,
+ node, network_interface, vpc_peering_connection.
+ """
+
+ params = {'Action': 'ReplaceRoute',
+ 'RouteTableId': route_table.id,
+ 'DestinationCidrBlock': cidr}
+
+ if internet_gateway:
+ params['GatewayId'] = internet_gateway.id
+
+ if node:
+ params['InstanceId'] = node.id
+
+ if network_interface:
+ params['NetworkInterfaceId'] = network_interface.id
+
+ if vpc_peering_connection:
+ params['VpcPeeringConnectionId'] = vpc_peering_connection.id
+
+ res = self.connection.request(self.path, params=params).object
+
+ return self._get_boolean(res)
+
+ def _to_nodes(self, object, xpath):
+ return [self._to_node(el)
+ for el in object.findall(fixxpath(xpath=xpath,
+ namespace=NAMESPACE))]
+
+ def _to_node(self, element):
+ try:
+ state = self.NODE_STATE_MAP[findattr(element=element,
+ xpath="instanceState/name",
+ namespace=NAMESPACE)
+ ]
+ except KeyError:
+ state = NodeState.UNKNOWN
+
+ instance_id = findtext(element=element, xpath='instanceId',
+ namespace=NAMESPACE)
+ public_ip = findtext(element=element, xpath='ipAddress',
+ namespace=NAMESPACE)
+ public_ips = [public_ip] if public_ip else []
+ private_ip = findtext(element=element, xpath='privateIpAddress',
+ namespace=NAMESPACE)
+ private_ips = [private_ip] if private_ip else []
+ product_codes = []
+ for p in findall(element=element,
+ xpath="productCodesSet/item/productCode",
+ namespace=NAMESPACE):
+ product_codes.append(p)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+ name = tags.get('Name', instance_id)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
+
+ # Add additional properties to our extra dictionary
+ extra['block_device_mapping'] = self._to_device_mappings(element)
+ extra['groups'] = self._get_security_groups(element)
+ extra['network_interfaces'] = self._to_interfaces(element)
+ extra['product_codes'] = product_codes
+ extra['tags'] = tags
+
+ return Node(id=instance_id, name=name, state=state,
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self.connection.driver, extra=extra)
+
+ def _to_images(self, object):
+ return [self._to_image(el) for el in object.findall(
+ fixxpath(xpath='imagesSet/item', namespace=NAMESPACE))
+ ]
+
+ def _to_image(self, element):
+
+ id = findtext(element=element, xpath='imageId', namespace=NAMESPACE)
+ name = findtext(element=element, xpath='name', namespace=NAMESPACE)
+
+ # Build block device mapping
+ block_device_mapping = self._to_device_mappings(element)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image'])
+
+ # Add our tags and block device mapping
+ extra['tags'] = tags
+ extra['block_device_mapping'] = block_device_mapping
+
+ return NodeImage(id=id, name=name, driver=self, extra=extra)
+
+ def _to_volume(self, element, name=None):
+ """
+ Parse the XML element and return a StorageVolume object.
+
+ :param name: An optional name for the volume. If not provided
+ then either tag with a key "Name" or volume ID
+ will be used (which ever is available first in that
+ order).
+ :type name: ``str``
+
+ :rtype: :class:`StorageVolume`
+ """
+ volId = findtext(element=element, xpath='volumeId',
+ namespace=NAMESPACE)
+ size = findtext(element=element, xpath='size', namespace=NAMESPACE)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # If name was not passed into the method then
+ # fall back then use the volume id
+ name = name if name else tags.get('Name', volId)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])
+
+ extra['tags'] = tags
+
+ return StorageVolume(id=volId,
+ name=name,
+ size=int(size),
+ driver=self,
+ extra=extra)
+
+ def _to_snapshots(self, response):
+ return [self._to_snapshot(el) for el in response.findall(
+ fixxpath(xpath='snapshotSet/item', namespace=NAMESPACE))
+ ]
+
+ def _to_snapshot(self, element, name=None):
+ snapId = findtext(element=element, xpath='snapshotId',
+ namespace=NAMESPACE)
+ size = findtext(element=element, xpath='volumeSize',
+ namespace=NAMESPACE)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # If name was not passed into the method then
+ # fall back then use the snapshot id
+ name = name if name else tags.get('Name', snapId)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot'])
+
+ # Add tags and name to the extra dict
+ extra['tags'] = tags
+ extra['name'] = name
+
+ return VolumeSnapshot(snapId, size=int(size),
+ driver=self, extra=extra)
+
+ def _to_key_pairs(self, elems):
+ key_pairs = [self._to_key_pair(elem=elem) for elem in elems]
+ return key_pairs
+
+ def _to_key_pair(self, elem):
+ name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE)
+ fingerprint = findtext(element=elem, xpath='keyFingerprint',
+ namespace=NAMESPACE).strip()
+ private_key = findtext(element=elem, xpath='keyMaterial',
+ namespace=NAMESPACE)
+
+ key_pair = KeyPair(name=name,
+ public_key=None,
+ fingerprint=fingerprint,
+ private_key=private_key,
+ driver=self)
+ return key_pair
+
+ def _to_security_groups(self, response):
+ return [self._to_security_group(el) for el in response.findall(
+ fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE))
+ ]
+
+ def _to_security_group(self, element):
+ # security group id
+ sg_id = findtext(element=element,
+ xpath='groupId',
+ namespace=NAMESPACE)
+
+ # security group name
+ name = findtext(element=element,
+ xpath='groupName',
+ namespace=NAMESPACE)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['security_group'])
+
+ # Add tags to the extra dict
+ extra['tags'] = tags
+
+ # Get ingress rules
+ ingress_rules = self._to_security_group_rules(
+ element, 'ipPermissions/item'
+ )
+
+ # Get egress rules
+ egress_rules = self._to_security_group_rules(
+ element, 'ipPermissionsEgress/item'
+ )
+
+ return EC2SecurityGroup(sg_id, name, ingress_rules,
+ egress_rules, extra=extra)
+
+ def _to_security_group_rules(self, element, xpath):
+ return [self._to_security_group_rule(el) for el in element.findall(
+ fixxpath(xpath=xpath, namespace=NAMESPACE))
+ ]
+
+ def _to_security_group_rule(self, element):
+ """
+ Parse the XML element and return a SecurityGroup object.
+
+ :rtype: :class:`EC2SecurityGroup`
+ """
+
+ rule = {}
+ rule['protocol'] = findtext(element=element,
+ xpath='ipProtocol',
+ namespace=NAMESPACE)
+
+ rule['from_port'] = findtext(element=element,
+ xpath='fromPort',
+ namespace=NAMESPACE)
+
+ rule['to_port'] = findtext(element=element,
+ xpath='toPort',
+ namespace=NAMESPACE)
+
+ # get security groups
+ elements = element.findall(fixxpath(
+ xpath='groups/item',
+ namespace=NAMESPACE
+ ))
+
+ rule['group_pairs'] = []
+
+ for element in elements:
+ item = {
+ 'user_id': findtext(
+ element=element,
+ xpath='userId',
+ namespace=NAMESPACE),
+ 'group_id': findtext(
+ element=element,
+ xpath='groupId',
+ namespace=NAMESPACE),
+ 'group_name': findtext(
+ element=element,
+ xpath='groupName',
+ namespace=NAMESPACE)
+ }
+ rule['group_pairs'].append(item)
+
+ # get ip ranges
+ elements = element.findall(fixxpath(
+ xpath='ipRanges/item',
+ namespace=NAMESPACE
+ ))
+
+ rule['cidr_ips'] = [
+ findtext(
+ element=element,
+ xpath='cidrIp',
+ namespace=NAMESPACE
+ ) for element in elements]
+
+ return rule
+
+ def _to_networks(self, response):
+ return [self._to_network(el) for el in response.findall(
+ fixxpath(xpath='vpcSet/item', namespace=NAMESPACE))
+ ]
+
+ def _to_network(self, element, name=None):
+ # Get the network id
+ vpc_id = findtext(element=element,
+ xpath='vpcId',
+ namespace=NAMESPACE)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # Set our name if the Name key/value if available
+ # If we don't get anything back then use the vpc_id
+ name = name if name else tags.get('Name', vpc_id)
+
+ cidr_block = findtext(element=element,
+ xpath='cidrBlock',
+ namespace=NAMESPACE)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['network'])
+
+ # Add tags to the extra dict
+ extra['tags'] = tags
+
+ return EC2Network(vpc_id, name, cidr_block, extra=extra)
+
+ def _to_addresses(self, response, only_associated):
+ """
+ Builds a list of dictionaries containing elastic IP properties.
+
+ :param only_associated: If true, return only those addresses
+ that are associated with an instance.
+ If false, return all addresses.
+ :type only_associated: ``bool``
+
+ :rtype: ``list`` of :class:`ElasticIP`
+ """
+ addresses = []
+ for el in response.findall(fixxpath(xpath='addressesSet/item',
+ namespace=NAMESPACE)):
+ addr = self._to_address(el, only_associated)
+ if addr is not None:
+ addresses.append(addr)
+
+ return addresses
+
+ def _to_address(self, element, only_associated):
+ instance_id = findtext(element=element, xpath='instanceId',
+ namespace=NAMESPACE)
+
+ public_ip = findtext(element=element,
+ xpath='publicIp',
+ namespace=NAMESPACE)
+
+ domain = findtext(element=element,
+ xpath='domain',
+ namespace=NAMESPACE)
+
+ # Build our extra dict
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['elastic_ip'])
+
+ # Return NoneType if only associated IPs are requested
+ if only_associated and not instance_id:
+ return None
+
+ return ElasticIP(public_ip, domain, instance_id, extra=extra)
+
+ def _to_subnets(self, response):
+ return [self._to_subnet(el) for el in response.findall(
+ fixxpath(xpath='subnetSet/item', namespace=NAMESPACE))
+ ]
+
+ def _to_subnet(self, element, name=None):
+ # Get the subnet ID
+ subnet_id = findtext(element=element,
+ xpath='subnetId',
+ namespace=NAMESPACE)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # If we don't get anything back then use the subnet_id
+ name = name if name else tags.get('Name', subnet_id)
+
+ state = findtext(element=element,
+ xpath='state',
+ namespace=NAMESPACE)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['subnet'])
+
+ # Also include our tags
+ extra['tags'] = tags
+
+ return EC2NetworkSubnet(subnet_id, name, state, extra=extra)
+
+ def _to_interfaces(self, response):
+ return [self._to_interface(el) for el in response.findall(
+ fixxpath(xpath='networkInterfaceSet/item', namespace=NAMESPACE))
+ ]
+
+ def _to_interface(self, element, name=None):
+ """
+ Parse the XML element and return a EC2NetworkInterface object.
+
+ :param name: An optional name for the interface. If not provided
+ then either tag with a key "Name" or the interface ID
+ will be used (whichever is available first in that
+ order).
+ :type name: ``str``
+
+ :rtype: :class: `EC2NetworkInterface`
+ """
+
+ interface_id = findtext(element=element,
+ xpath='networkInterfaceId',
+ namespace=NAMESPACE)
+
+ state = findtext(element=element,
+ xpath='status',
+ namespace=NAMESPACE)
+
+ # Get tags
+ tags = self._get_resource_tags(element)
+
+ name = name if name else tags.get('Name', interface_id)
+
+ # Build security groups
+ groups = self._get_security_groups(element)
+
+ # Build private IPs
+ priv_ips = []
+ for item in findall(element=element,
+ xpath='privateIpAddressesSet/item',
+ namespace=NAMESPACE):
+
+ priv_ips.append({'private_ip': findtext(element=item,
+ xpath='privateIpAddress',
+ namespace=NAMESPACE),
+ 'private_dns': findtext(element=item,
+ xpath='privateDnsName',
+ namespace=NAMESPACE),
+ 'primary': findtext(element=item,
+ xpath='primary',
+ namespace=NAMESPACE)})
+
+ # Build our attachment dictionary which we will add into extra later
+ attributes_map = \
+ RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment']
+ attachment = self._get_extra_dict(element, attributes_map)
+
+ # Build our extra dict
+ attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface']
+ extra = self._get_extra_dict(element, attributes_map)
+
+ # Include our previously built items as well
+ extra['tags'] = tags
+ extra['attachment'] = attachment
+ extra['private_ips'] = priv_ips
+ extra['groups'] = groups
+
+ return EC2NetworkInterface(interface_id, name, state, extra=extra)
+
+ def _to_reserved_nodes(self, object, xpath):
+ return [self._to_reserved_node(el)
+ for el in object.findall(fixxpath(xpath=xpath,
+ namespace=NAMESPACE))]
+
+ def _to_reserved_node(self, element):
+ """
+ Build an EC2ReservedNode object using the reserved instance properties.
+ Information on these properties can be found at http://goo.gl/ulXCC7.
+ """
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node'])
+
+ try:
+ size = [size for size in self.list_sizes() if
+ size.id == extra['instance_type']][0]
+ except IndexError:
+ size = None
+
+ return EC2ReservedNode(id=findtext(element=element,
+ xpath='reservedInstancesId',
+ namespace=NAMESPACE),
+ state=findattr(element=element,
+ xpath='state',
+ namespace=NAMESPACE),
+ driver=self,
+ size=size,
+ extra=extra)
+
+ def _to_device_mappings(self, object):
+ return [self._to_device_mapping(el) for el in object.findall(
+ fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE))
+ ]
+
+ def _to_device_mapping(self, element):
+ """
+ Parse the XML element and return a dictionary of device properties.
+ Additional information can be found at http://goo.gl/GjWYBf.
+
+ @note: EBS volumes do not have a virtual name. Only ephemeral
+ disks use this property.
+ :rtype: ``dict``
+ """
+ mapping = {}
+
+ mapping['device_name'] = findattr(element=element,
+ xpath='deviceName',
+ namespace=NAMESPACE)
+
+ mapping['virtual_name'] = findattr(element=element,
+ xpath='virtualName',
+ namespace=NAMESPACE)
+
+ # If virtual name does not exist then this is an EBS volume.
+ # Build the EBS dictionary leveraging the _get_extra_dict method.
+ if mapping['virtual_name'] is None:
+ mapping['ebs'] = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_volume'])
+
+ return mapping
+
+ def _to_internet_gateways(self, object, xpath):
+ return [self._to_internet_gateway(el)
+ for el in object.findall(fixxpath(xpath=xpath,
+ namespace=NAMESPACE))]
+
+ def _to_internet_gateway(self, element, name=None):
+ id = findtext(element=element,
+ xpath='internetGatewayId',
+ namespace=NAMESPACE)
+
+ vpc_id = findtext(element=element,
+ xpath='attachmentSet/item/vpcId',
+ namespace=NAMESPACE)
+
+ state = findtext(element=element,
+ xpath='attachmentSet/item/state',
+ namespace=NAMESPACE)
+
+ # If there's no attachment state, let's
+ # set it to available
+ if not state:
+ state = 'available'
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # If name was not passed into the method then
+ # fall back then use the gateway id
+ name = name if name else tags.get('Name', id)
+
+ return VPCInternetGateway(id=id, name=name, vpc_id=vpc_id,
+ state=state, driver=self.connection.driver,
+ extra={'tags': tags})
+
+ def _to_route_tables(self, response):
+ return [self._to_route_table(el) for el in response.findall(
+ fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE))
+ ]
+
+ def _to_route_table(self, element, name=None):
+ # route table id
+ route_table_id = findtext(element=element,
+ xpath='routeTableId',
+ namespace=NAMESPACE)
+
+ # Get our tags
+ tags = self._get_resource_tags(element)
+
+ # Get our extra dictionary
+ extra = self._get_extra_dict(
+ element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table'])
+
+ # Add tags to the extra dict
+ extra['tags'] = tags
+
+ # Get routes
+ routes = self._to_routes(element, 'routeSet/item')
+
+ # Get subnet associations
+ subnet_associations = self._to_subnet_associations(
+ element, 'associationSet/item')
+
+ # Get propagating routes virtual private gateways (VGW) IDs
+ propagating_gateway_ids = []
+ for el in element.findall(fixxpath(xpath='propagatingVgwSet/item',
+ namespace=NAMESPACE)):
+ propagating_gateway_ids.append(findtext(element=el,
+ xpath='gatewayId',
+ namespace=NAMESPACE))
+
+ name = name if name else tags.get('Name', id)
+
+ return EC2RouteTable(route_table_id, name, routes, subnet_associations,
+ propagating_gateway_ids, extra=extra)
+
+ def _to_routes(self, element, xpath):
+ return [self._to_route(el) for el in element.findall(
+ fixxpath(xpath=xpath, namespace=NAMESPACE))
+ ]
+
+ def _to_route(self, element):
+ """
+ Parse the XML element and return a route object
+
+ :rtype: :class: `EC2Route`
+ """
+
+ destination_cidr = findtext(element=element,
+ xpath='destinationCidrBlock',
+ namespace=NAMESPACE)
+
+ gateway_id = findtext(element=element,
+ xpath='gatewayId',
+ namespace=NAMESPACE)
+
+ instance_id = findtext(element=element,
+ xpath='instanceId',
+ namespace=NAMESPACE)
+
+ owner_id = findtext(element=element,
+ xpath='instanceOwnerId',
+ namespace=NAMESPACE)
+
+ interface_id = findtext(element=element,
+ xpath='networkInterfaceId',
+ namespace=NAMESPACE)
+
+ state = findtext(element=element,
+ xpath='state',
+ namespace=NAMESPACE)
+
+ origin = findtext(element=element,
+ xpath='origin',
+ namespace=NAMESPACE)
+
+ vpc_peering_connection_id = findtext(element=element,
+ xpath='vpcPeeringConnectionId',
+ namespace=NAMESPACE)
+
+ return EC2Route(destination_cidr, gateway_id, instance_id, owner_id,
+ interface_id, state, origin, vpc_peering_connection_id)
+
+ def _to_subnet_associations(self, element, xpath):
+ return [self._to_subnet_association(el) for el in element.findall(
+ fixxpath(xpath=xpath, namespace=NAMESPACE))
+ ]
+
+ def _to_subnet_association(self, element):
+ """
+ Parse the XML element and return a route table association object
+
+ :rtype: :class: `EC2SubnetAssociation`
+ """
+
+ association_id = findtext(element=element,
+ xpath='routeTableAssociationId',
+ namespace=NAMESPACE)
+
+ route_table_id = findtext(element=element,
+ xpath='routeTableId',
+ namespace=NAMESPACE)
+
+ subnet_id = findtext(element=element,
+ xpath='subnetId',
+ namespace=NAMESPACE)
+
+ main = findtext(element=element,
+ xpath='main',
+ namespace=NAMESPACE)
+
+ main = True if main else False
+
+ return EC2SubnetAssociation(association_id, route_table_id,
+ subnet_id, main)
+
+ def _pathlist(self, key, arr):
+ """
+ Converts a key and an array of values into AWS query param format.
+ """
+ params = {}
+ i = 0
+
+ for value in arr:
+ i += 1
+ params['%s.%s' % (key, i)] = value
+
+ return params
+
+ def _get_boolean(self, element):
+ tag = '{%s}%s' % (NAMESPACE, 'return')
+ return element.findtext(tag) == 'true'
+
+ def _get_terminate_boolean(self, element):
+ status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
+ return any([term_status == status
+ for term_status
+ in ('shutting-down', 'terminated')])
+
+ def _add_instance_filter(self, params, node):
+ """
+ Add instance filter to the provided params dictionary.
+ """
+ filters = {'instance-id': node.id}
+ params.update(self._build_filters(filters))
+
+ return params
+
+ def _get_state_boolean(self, element):
+ """
+ Checks for the instances's state
+ """
+ state = findall(element=element,
+ xpath='instancesSet/item/currentState/name',
+ namespace=NAMESPACE)[0].text
+
+ return state in ('stopping', 'pending', 'starting')
+
+ def _get_extra_dict(self, element, mapping):
+ """
+ Extract attributes from the element based on rules provided in the
+ mapping dictionary.
+
+ :param element: Element to parse the values from.
+ :type element: xml.etree.ElementTree.Element.
+
+ :param mapping: Dictionary with the extra layout
+ :type node: :class:`Node`
+
+ :rtype: ``dict``
+ """
+ extra = {}
+ for attribute, values in mapping.items():
+ transform_func = values['transform_func']
+ value = findattr(element=element,
+ xpath=values['xpath'],
+ namespace=NAMESPACE)
+ if value is not None:
+ extra[attribute] = transform_func(value)
+ else:
+ extra[attribute] = None
+
+ return extra
+
+ def _get_resource_tags(self, element):
+ """
+ Parse tags from the provided element and return a dictionary with
+ key/value pairs.
+
+ :rtype: ``dict``
+ """
+ tags = {}
+
+ # Get our tag set by parsing the element
+ tag_set = findall(element=element,
+ xpath='tagSet/item',
+ namespace=NAMESPACE)
+
+ for tag in tag_set:
+ key = findtext(element=tag,
+ xpath='key',
+ namespace=NAMESPACE)
+
+ value = findtext(element=tag,
+ xpath='value',
+ namespace=NAMESPACE)
+
+ tags[key] = value
+
+ return tags
+
+ def _get_block_device_mapping_params(self, block_device_mapping):
+ """
+ Return a list of dictionaries with query parameters for
+ a valid block device mapping.
+
+ :param mapping: List of dictionaries with the drive layout
+ :type mapping: ``list`` or ``dict``
+
+ :return: Dictionary representation of the drive mapping
+ :rtype: ``dict``
+ """
+
+ if not isinstance(block_device_mapping, (list, tuple)):
+ raise AttributeError(
+ 'block_device_mapping not list or tuple')
+
+ params = {}
+
+ for idx, mapping in enumerate(block_device_mapping):
+ idx += 1 # We want 1-based indexes
+ if not isinstance(mapping, dict):
+ raise AttributeError(
+ 'mapping %s in block_device_mapping '
+ 'not a dict' % mapping)
+ for k, v in mapping.items():
+ if not isinstance(v, dict):
+ params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v)
+ else:
+ for key, value in v.items():
+ params['BlockDeviceMapping.%d.%s.%s'
+ % (idx, k, key)] = str(value)
+ return params
+
+ def _get_common_security_group_params(self, group_id, protocol,
+ from_port, to_port, cidr_ips,
+ group_pairs):
+ """
+ Return a dictionary with common query parameters which are used when
+ operating on security groups.
+
+ :rtype: ``dict``
+ """
+ params = {'GroupId': group_id,
+ 'IpPermissions.1.IpProtocol': protocol,
+ 'IpPermissions.1.FromPort': from_port,
+ 'IpPermissions.1.ToPort': to_port}
+
+ if cidr_ips is not None:
+ ip_ranges = {}
+ for index, cidr_ip in enumerate(cidr_ips):
+ index += 1
+
+ ip_ranges['IpPermissions.1.IpRanges.%s.CidrIp'
+ % (index)] = cidr_ip
+
+ params.update(ip_ranges)
+
+ if group_pairs is not None:
+ user_groups = {}
+ for index, group_pair in enumerate(group_pairs):
+ index += 1
+
+ if 'group_id' in group_pair.keys():
+ user_groups['IpPermissions.1.Groups.%s.GroupId'
+ % (index)] = group_pair['group_id']
+
+ if 'group_name' in group_pair.keys():
+ user_groups['IpPermissions.1.Groups.%s.GroupName'
+ % (index)] = group_pair['group_name']
+
+ if 'user_id' in group_pair.keys():
+ user_groups['IpPermissions.1.Groups.%s.UserId'
+ % (index)] = group_pair['user_id']
+
+ params.update(user_groups)
+
+ return params
+
+ def _get_security_groups(self, element):
+ """
+ Parse security groups from the provided element and return a
+ list of security groups with the id ane name key/value pairs.
+
+ :rtype: ``list`` of ``dict``
+ """
+ groups = []
+
+ for item in findall(element=element,
+ xpath='groupSet/item',
+ namespace=NAMESPACE):
+ groups.append({
+ 'group_id': findtext(element=item,
+ xpath='groupId',
+ namespace=NAMESPACE),
+ 'group_name': findtext(element=item,
+ xpath='groupName',
+ namespace=NAMESPACE)
+ })
+
+ return groups
+
+ def _build_filters(self, filters):
+ """
+ Return a dictionary with filter query parameters which are used when
+ listing networks, security groups, etc.
+
+ :param filters: Dict of filter names and filter values
+ :type filters: ``dict``
+
+ :rtype: ``dict``
+ """
+
+ filter_entries = {}
+
+ for filter_idx, filter_data in enumerate(filters.items()):
+ filter_idx += 1 # We want 1-based indexes
+ filter_name, filter_values = filter_data
+ filter_key = 'Filter.%s.Name' % (filter_idx)
+ filter_entries[filter_key] = filter_name
+
+ if isinstance(filter_values, list):
+ for value_idx, value in enumerate(filter_values):
+ value_idx += 1 # We want 1-based indexes
+ value_key = 'Filter.%s.Value.%s' % (filter_idx,
+ value_idx)
+ filter_entries[value_key] = value
+ else:
+ value_key = 'Filter.%s.Value.1' % (filter_idx)
+ filter_entries[value_key] = filter_values
+
+ return filter_entries
+
+
+class EC2NodeDriver(BaseEC2NodeDriver):
+ """
+ Amazon EC2 node driver.
+ """
+
+ connectionCls = EC2Connection
+ type = Provider.EC2
+ name = 'Amazon EC2'
+ website = 'http://aws.amazon.com/ec2/'
+ path = '/'
+
+ NODE_STATE_MAP = {
+ 'pending': NodeState.PENDING,
+ 'running': NodeState.RUNNING,
+ 'shutting-down': NodeState.UNKNOWN,
+ 'terminated': NodeState.TERMINATED,
+ 'stopped': NodeState.STOPPED
+ }
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='us-east-1', **kwargs):
+ if hasattr(self, '_region'):
+ region = self._region
+
+ if region not in VALID_EC2_REGIONS:
+ raise ValueError('Invalid region: %s' % (region))
+
+ details = REGION_DETAILS[region]
+ self.region_name = region
+ self.api_name = details['api_name']
+ self.country = details['country']
+
+ self.connectionCls.host = details['endpoint']
+
+ super(EC2NodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, **kwargs)
+
+
+class IdempotentParamError(LibcloudError):
+ """
+ Request used the same client token as a previous,
+ but non-identical request.
+ """
+
+ def __str__(self):
+ return repr(self.value)
+
+
+class EC2EUNodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the Western Europe Region.
+ """
+ name = 'Amazon EC2 (eu-west-1)'
+ _region = 'eu-west-1'
+
+
+class EC2USWestNodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the Western US Region
+ """
+ name = 'Amazon EC2 (us-west-1)'
+ _region = 'us-west-1'
+
+
+class EC2USWestOregonNodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the US West Oregon region.
+ """
+ name = 'Amazon EC2 (us-west-2)'
+ _region = 'us-west-2'
+
+
+class EC2APSENodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the Southeast Asia Pacific Region.
+ """
+ name = 'Amazon EC2 (ap-southeast-1)'
+ _region = 'ap-southeast-1'
+
+
+class EC2APNENodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the Northeast Asia Pacific Region.
+ """
+ name = 'Amazon EC2 (ap-northeast-1)'
+ _region = 'ap-northeast-1'
+
+
+class EC2SAEastNodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the South America (Sao Paulo) Region.
+ """
+ name = 'Amazon EC2 (sa-east-1)'
+ _region = 'sa-east-1'
+
+
+class EC2APSESydneyNodeDriver(EC2NodeDriver):
+ """
+ Driver class for EC2 in the Southeast Asia Pacific (Sydney) Region.
+ """
+ name = 'Amazon EC2 (ap-southeast-2)'
+ _region = 'ap-southeast-2'
+
+
+class EucConnection(EC2Connection):
+ """
+ Connection class for Eucalyptus
+ """
+
+ host = None
+
+
+class EucNodeDriver(BaseEC2NodeDriver):
+ """
+ Driver class for Eucalyptus
+ """
+
+ name = 'Eucalyptus'
+ website = 'http://www.eucalyptus.com/'
+ api_name = 'ec2_us_east'
+ region_name = 'us-east-1'
+ connectionCls = EucConnection
+
+ def __init__(self, key, secret=None, secure=True, host=None,
+ path=None, port=None, api_version=DEFAULT_EUCA_API_VERSION):
+ """
+ @inherits: :class:`EC2NodeDriver.__init__`
+
+ :param path: The host where the API can be reached.
+ :type path: ``str``
+
+ :param api_version: The API version to extend support for
+ Eucalyptus proprietary API calls
+ :type api_version: ``str``
+ """
+ super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
+
+ if path is None:
+ path = '/services/Eucalyptus'
+
+ self.path = path
+ self.EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (api_version)
+
+ def list_locations(self):
+ raise NotImplementedError(
+ 'list_locations not implemented for this driver')
+
+ def _to_sizes(self, response):
+ return [self._to_size(el) for el in response.findall(
+ fixxpath(xpath='instanceTypeDetails/item',
+ namespace=self.EUCA_NAMESPACE))]
+
+ def _to_size(self, el):
+ name = findtext(element=el,
+ xpath='name',
+ namespace=self.EUCA_NAMESPACE)
+ cpu = findtext(element=el,
+ xpath='cpu',
+ namespace=self.EUCA_NAMESPACE)
+ disk = findtext(element=el,
+ xpath='disk',
+ namespace=self.EUCA_NAMESPACE)
+ memory = findtext(element=el,
+ xpath='memory',
+ namespace=self.EUCA_NAMESPACE)
+
+ return NodeSize(id=name,
+ name=name,
+ ram=int(memory),
+ disk=int(disk),
+ bandwidth=None,
+ price=None,
+ driver=EucNodeDriver,
+ extra={
+ 'cpu': int(cpu)
+ })
+
+ def list_sizes(self):
+ """
+ List available instance flavors/sizes
+
+ :rtype: ``list`` of :class:`NodeSize`
+ """
+ params = {'Action': 'DescribeInstanceTypes'}
+ response = self.connection.request(self.path, params=params).object
+
+ return self._to_sizes(response)
+
+ def _add_instance_filter(self, params, node):
+ """
+ Eucalyptus driver doesn't support filtering on instance id so this is a
+ no-op.
+ """
+ pass
+
+
+class NimbusConnection(EC2Connection):
+ """
+ Connection class for Nimbus
+ """
+
+ host = None
+
+
+class NimbusNodeDriver(BaseEC2NodeDriver):
+ """
+ Driver class for Nimbus
+ """
+
+ type = Provider.NIMBUS
+ name = 'Nimbus'
+ website = 'http://www.nimbusproject.org/'
+ country = 'Private'
+ api_name = 'nimbus'
+ region_name = 'nimbus'
+ friendly_name = 'Nimbus Private Cloud'
+ connectionCls = NimbusConnection
+
+ def ex_describe_addresses(self, nodes):
+ """
+ Nimbus doesn't support elastic IPs, so this is a pass-through.
+
+ @inherits: :class:`EC2NodeDriver.ex_describe_addresses`
+ """
+ nodes_elastic_ip_mappings = {}
+ for node in nodes:
+ # empty list per node
+ nodes_elastic_ip_mappings[node.id] = []
+ return nodes_elastic_ip_mappings
+
+ def ex_create_tags(self, resource, tags):
+ """
+ Nimbus doesn't support creating tags, so this is a pass-through.
+
+ @inherits: :class:`EC2NodeDriver.ex_create_tags`
+ """
+ pass
+
+
+class OutscaleConnection(EC2Connection):
+ """
+ Connection class for Outscale
+ """
+
+ host = None
+
+
+class OutscaleNodeDriver(BaseEC2NodeDriver):
+ """
+ Base Outscale FCU node driver.
+
+ Outscale per provider driver classes inherit from it.
+ """
+
+ connectionCls = OutscaleConnection
+ name = 'Outscale'
+ website = 'http://www.outscale.com'
+ path = '/'
+
+ NODE_STATE_MAP = {
+ 'pending': NodeState.PENDING,
+ 'running': NodeState.RUNNING,
+ 'shutting-down': NodeState.UNKNOWN,
+ 'terminated': NodeState.TERMINATED,
+ 'stopped': NodeState.STOPPED
+ }
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='us-east-1', region_details=None, **kwargs):
+ if hasattr(self, '_region'):
+ region = self._region
+
+ if region_details is None:
+ raise ValueError('Invalid region_details argument')
+
+ if region not in region_details.keys():
+ raise ValueError('Invalid region: %s' % (region))
+
+ self.region_name = region
+ self.region_details = region_details
+ details = self.region_details[region]
+ self.api_name = details['api_name']
+ self.country = details['country']
+
+ self.connectionCls.host = details['endpoint']
+
+ self._not_implemented_msg =\
+ 'This method is not supported in the Outscale driver'
+
+ super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, **kwargs)
+
+ def create_node(self, **kwargs):
+ """
+ Create a new Outscale node. The ex_iamprofile keyword is not supported.
+
+ @inherits: :class:`BaseEC2NodeDriver.create_node`
+
+ :keyword ex_keyname: The name of the key pair
+ :type ex_keyname: ``str``
+
+ :keyword ex_userdata: User data
+ :type ex_userdata: ``str``
+
+ :keyword ex_security_groups: A list of names of security groups to
+ assign to the node.
+ :type ex_security_groups: ``list``
+
+ :keyword ex_metadata: Key/Value metadata to associate with a node
+ :type ex_metadata: ``dict``
+
+ :keyword ex_mincount: Minimum number of instances to launch
+ :type ex_mincount: ``int``
+
+ :keyword ex_maxcount: Maximum number of instances to launch
+ :type ex_maxcount: ``int``
+
+ :keyword ex_clienttoken: Unique identifier to ensure idempotency
+ :type ex_clienttoken: ``str``
+
+ :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
+ mappings.
+ :type ex_blockdevicemappings: ``list`` of ``dict``
+
+ :keyword ex_ebs_optimized: EBS-Optimized if True
+ :type ex_ebs_optimized: ``bool``
+ """
+ if 'ex_iamprofile' in kwargs:
+ raise NotImplementedError("ex_iamprofile not implemented")
+ return super(OutscaleNodeDriver, self).create_node(**kwargs)
+
+ def ex_create_network(self, cidr_block, name=None):
+ """
+ Create a network/VPC. Outscale does not support instance_tenancy.
+
+ :param cidr_block: The CIDR block assigned to the network
+ :type cidr_block: ``str``
+
+ :param name: An optional name for the network
+ :type name: ``str``
+
+ :return: Dictionary of network properties
+ :rtype: ``dict``
+ """
+ return super(OutscaleNodeDriver, self).ex_create_network(cidr_block,
+ name=name)
+
+ def ex_modify_instance_attribute(self, node, disable_api_termination=None,
+ ebs_optimized=None, group_id=None,
+ source_dest_check=None, user_data=None,
+ instance_type=None):
+ """
+ Modify node attributes.
+ Ouscale support the following attributes:
+ 'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n',
+ 'SourceDestCheck.Value', 'UserData.Value',
+ 'InstanceType.Value'
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :param attributes: Dictionary with node attributes
+ :type attributes: ``dict``
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+ attributes = {}
+
+ if disable_api_termination is not None:
+ attributes['DisableApiTermination.Value'] = disable_api_termination
+ if ebs_optimized is not None:
+ attributes['EbsOptimized'] = ebs_optimized
+ if group_id is not None:
+ attributes['GroupId.n'] = group_id
+ if source_dest_check is not None:
+ attributes['SourceDestCheck.Value'] = source_dest_check
+ if user_data is not None:
+ attributes['UserData.Value'] = user_data
+ if instance_type is not None:
+ attributes['InstanceType.Value'] = instance_type
+
+ return super(OutscaleNodeDriver, self).ex_modify_instance_attribute(
+ node, attributes)
+
+ def ex_register_image(self, name, description=None, architecture=None,
+ root_device_name=None, block_device_mapping=None):
+ """
+ Registers a Machine Image based off of an EBS-backed instance.
+ Can also be used to create images from snapshots.
+
+ Outscale does not support image_location, kernel_id and ramdisk_id.
+
+ :param name: The name for the AMI being registered
+ :type name: ``str``
+
+ :param description: The description of the AMI (optional)
+ :type description: ``str``
+
+ :param architecture: The architecture of the AMI (i386/x86_64)
+ (optional)
+ :type architecture: ``str``
+
+ :param root_device_name: The device name for the root device
+ Required if registering a EBS-backed AMI
+ :type root_device_name: ``str``
+
+ :param block_device_mapping: A dictionary of the disk layout
+ (optional)
+ :type block_device_mapping: ``dict``
+
+ :rtype: :class:`NodeImage`
+ """
+ return super(OutscaleNodeDriver, self).ex_register_image(
+ name, description=description, architecture=architecture,
+ root_device_name=root_device_name,
+ block_device_mapping=block_device_mapping)
+
+ def ex_copy_image(self, source_region, image, name=None, description=None):
+ """
+ Outscale does not support copying images.
+
+ @inherits: :class:`EC2NodeDriver.ex_copy_image`
+ """
+ raise NotImplementedError(self._not_implemented_msg)
+
+ def ex_get_limits(self):
+ """
+ Outscale does not support getting limits.
+
+ @inherits: :class:`EC2NodeDriver.ex_get_limits`
+ """
+ raise NotImplementedError(self._not_implemented_msg)
+
+ def ex_create_network_interface(self, subnet, name=None,
+ description=None,
+ private_ip_address=None):
+ """
+ Outscale does not support creating a network interface within a VPC.
+
+ @inherits: :class:`EC2NodeDriver.ex_create_network_interface`
+ """
+ raise NotImplementedError(self._not_implemented_msg)
+
+ def ex_delete_network_interface(self, network_interface):
+ """
+ Outscale does not support deleting a network interface within a VPC.
+
+ @inherits: :class:`EC2NodeDriver.ex_delete_network_interface`
+ """
+ raise NotImplementedError(self._not_implemented_msg)
+
+ def ex_attach_network_interface_to_node(self, network_interface,
+ node, device_index):
+ """
+ Outscale does not support attaching a network interface.
+
+ @inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node`
+ """
+ raise NotImplementedError(self._not_implemented_msg)
+
+ def ex_detach_network_interface(self, attachment_id, force=False):
+ """
+ Outscale does not support detaching a network interface
+
+ @inherits: :class:`EC2NodeDriver.ex_detach_network_interface`
+ """
+ raise NotImplementedError(self._not_implemented_msg)
+
+ def list_sizes(self, location=None):
+ """
+ List available instance flavors/sizes
+
+ This override the EC2 default method in order to use Outscale infos.
+
+ :rtype: ``list`` of :class:`NodeSize`
+ """
+ available_types =\
+ self.region_details[self.region_name]['instance_types']
+ sizes = []
+
+ for instance_type in available_types:
+ attributes = OUTSCALE_INSTANCE_TYPES[instance_type]
+ attributes = copy.deepcopy(attributes)
+ price = self._get_size_price(size_id=instance_type)
+ attributes.update({'price': price})
+ sizes.append(NodeSize(driver=self, **attributes))
+ return sizes
+
+
+class OutscaleSASNodeDriver(OutscaleNodeDriver):
+ """
+ Outscale SAS node driver
+ """
+ name = 'Outscale SAS'
+ type = Provider.OUTSCALE_SAS
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='us-east-1', region_details=None, **kwargs):
+ super(OutscaleSASNodeDriver, self).__init__(
+ key=key, secret=secret, secure=secure, host=host, port=port,
+ region=region, region_details=OUTSCALE_SAS_REGION_DETAILS,
+ **kwargs)
+
+
+class OutscaleINCNodeDriver(OutscaleNodeDriver):
+ """
+ Outscale INC node driver
+ """
+ name = 'Outscale INC'
+ type = Provider.OUTSCALE_INC
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='us-east-1', region_details=None, **kwargs):
+ super(OutscaleINCNodeDriver, self).__init__(
+ key=key, secret=secret, secure=secure, host=host, port=port,
+ region=region, region_details=OUTSCALE_INC_REGION_DETAILS,
+ **kwargs)
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/ecp.py b/awx/lib/site-packages/libcloud/compute/drivers/ecp.py
new file mode 100644
index 0000000000..79b57fa5b2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/ecp.py
@@ -0,0 +1,385 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Enomaly ECP driver
+"""
+import time
+import base64
+import os
+import socket
+import binascii
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+# JSON is included in the standard library starting with Python 2.6. For 2.5
+# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.common.base import Response, ConnectionUserAndKey
+from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation
+from libcloud.compute.base import NodeImage, Node
+from libcloud.compute.types import Provider, NodeState, InvalidCredsError
+from libcloud.utils.networking import is_private_subnet
+
+# Defaults
+API_HOST = ''
+API_PORT = (80, 443)
+
+
+class ECPResponse(Response):
+ def success(self):
+ if self.status == httplib.OK or self.status == httplib.CREATED:
+ try:
+ j_body = json.loads(self.body)
+ except ValueError:
+ self.error = "JSON response cannot be decoded."
+ return False
+ if j_body['errno'] == 0:
+ return True
+ else:
+ self.error = "ECP error: %s" % j_body['message']
+ return False
+ elif self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError()
+ else:
+ self.error = "HTTP Error Code: %s" % self.status
+ return False
+
+ def parse_error(self):
+ return self.error
+
+ # Interpret the json responses - no error checking required
+ def parse_body(self):
+ return json.loads(self.body)
+
+ def getheaders(self):
+ return self.headers
+
+
+class ECPConnection(ConnectionUserAndKey):
+ """
+ Connection class for the Enomaly ECP driver
+ """
+
+ responseCls = ECPResponse
+ host = API_HOST
+ port = API_PORT
+
+ def add_default_headers(self, headers):
+ # Authentication
+ username = self.user_id
+ password = self.key
+ base64string = base64.encodestring(
+ b('%s:%s' % (username, password)))[:-1]
+ authheader = "Basic %s" % base64string
+ headers['Authorization'] = authheader
+
+ return headers
+
+ def _encode_multipart_formdata(self, fields):
+ """
+ Based on Wade Leftwich's function:
+ http://code.activestate.com/recipes/146306/
+ """
+ # use a random boundary that does not appear in the fields
+ boundary = ''
+ while boundary in ''.join(fields):
+ boundary = binascii.hexlify(os.urandom(16)).decode('utf-8')
+ L = []
+ for i in fields:
+ L.append('--' + boundary)
+ L.append('Content-Disposition: form-data; name="%s"' % i)
+ L.append('')
+ L.append(fields[i])
+ L.append('--' + boundary + '--')
+ L.append('')
+ body = '\r\n'.join(L)
+ content_type = 'multipart/form-data; boundary=%s' % boundary
+ header = {'Content-Type': content_type}
+ return header, body
+
+
+class ECPNodeDriver(NodeDriver):
+ """
+ Enomaly ECP node driver
+ """
+
+ name = "Enomaly Elastic Computing Platform"
+ website = 'http://www.enomaly.com/'
+ type = Provider.ECP
+ connectionCls = ECPConnection
+
+ def list_nodes(self):
+ """
+ Returns a list of all running Nodes
+
+ :rtype: ``list`` of :class:`Node`
+ """
+
+ # Make the call
+ res = self.connection.request('/rest/hosting/vm/list').parse_body()
+
+ # Put together a list of node objects
+ nodes = []
+ for vm in res['vms']:
+ node = self._to_node(vm)
+ if node is not None:
+ nodes.append(node)
+
+ # And return it
+ return nodes
+
+ def _to_node(self, vm):
+ """
+ Turns a (json) dictionary into a Node object.
+ This returns only running VMs.
+ """
+
+ # Check state
+ if not vm['state'] == "running":
+ return None
+
+ # IPs
+ iplist = [interface['ip'] for interface in vm['interfaces'] if
+ interface['ip'] != '127.0.0.1']
+
+ public_ips = []
+ private_ips = []
+ for ip in iplist:
+ try:
+ socket.inet_aton(ip)
+ except socket.error:
+ # not a valid ip
+ continue
+ if is_private_subnet(ip):
+ private_ips.append(ip)
+ else:
+ public_ips.append(ip)
+
+ # Create the node object
+ n = Node(
+ id=vm['uuid'],
+ name=vm['name'],
+ state=NodeState.RUNNING,
+ public_ips=public_ips,
+ private_ips=private_ips,
+ driver=self,
+ )
+
+ return n
+
+ def reboot_node(self, node):
+ """
+ Shuts down a VM and then starts it again.
+
+ @inherits: :class:`NodeDriver.reboot_node`
+ """
+
+ # Turn the VM off
+ # Black magic to make the POST requests work
+ d = self.connection._encode_multipart_formdata({'action': 'stop'})
+ self.connection.request(
+ '/rest/hosting/vm/%s' % node.id,
+ method='POST',
+ headers=d[0],
+ data=d[1]
+ ).parse_body()
+
+ node.state = NodeState.REBOOTING
+ # Wait for it to turn off and then continue (to turn it on again)
+ while node.state == NodeState.REBOOTING:
+ # Check if it's off.
+ response = self.connection.request(
+ '/rest/hosting/vm/%s' % node.id
+ ).parse_body()
+ if response['vm']['state'] == 'off':
+ node.state = NodeState.TERMINATED
+ else:
+ time.sleep(5)
+
+ # Turn the VM back on.
+ # Black magic to make the POST requests work
+ d = self.connection._encode_multipart_formdata({'action': 'start'})
+ self.connection.request(
+ '/rest/hosting/vm/%s' % node.id,
+ method='POST',
+ headers=d[0],
+ data=d[1]
+ ).parse_body()
+
+ node.state = NodeState.RUNNING
+ return True
+
+ def destroy_node(self, node):
+ """
+ Shuts down and deletes a VM.
+
+ @inherits: :class:`NodeDriver.destroy_node`
+ """
+
+ # Shut down first
+ # Black magic to make the POST requests work
+ d = self.connection._encode_multipart_formdata({'action': 'stop'})
+ self.connection.request(
+ '/rest/hosting/vm/%s' % node.id,
+ method='POST',
+ headers=d[0],
+ data=d[1]
+ ).parse_body()
+
+ # Ensure there was no applicationl level error
+ node.state = NodeState.PENDING
+ # Wait for the VM to turn off before continuing
+ while node.state == NodeState.PENDING:
+ # Check if it's off.
+ response = self.connection.request(
+ '/rest/hosting/vm/%s' % node.id
+ ).parse_body()
+ if response['vm']['state'] == 'off':
+ node.state = NodeState.TERMINATED
+ else:
+ time.sleep(5)
+
+ # Delete the VM
+ # Black magic to make the POST requests work
+ d = self.connection._encode_multipart_formdata({'action': 'delete'})
+ self.connection.request(
+ '/rest/hosting/vm/%s' % (node.id),
+ method='POST',
+ headers=d[0],
+ data=d[1]
+ ).parse_body()
+
+ return True
+
+ def list_images(self, location=None):
+ """
+ Returns a list of all package templates aka appiances aka images.
+
+ @inherits: :class:`NodeDriver.list_images`
+ """
+
+ # Make the call
+ response = self.connection.request(
+ '/rest/hosting/ptemplate/list').parse_body()
+
+ # Turn the response into an array of NodeImage objects
+ images = []
+ for ptemplate in response['packages']:
+ images.append(NodeImage(
+ id=ptemplate['uuid'],
+ name='%s: %s' % (ptemplate['name'], ptemplate['description']),
+ driver=self,)
+ )
+
+ return images
+
+ def list_sizes(self, location=None):
+ """
+ Returns a list of all hardware templates
+
+ @inherits: :class:`NodeDriver.list_sizes`
+ """
+
+ # Make the call
+ response = self.connection.request(
+ '/rest/hosting/htemplate/list').parse_body()
+
+ # Turn the response into an array of NodeSize objects
+ sizes = []
+ for htemplate in response['templates']:
+ sizes.append(NodeSize(
+ id=htemplate['uuid'],
+ name=htemplate['name'],
+ ram=htemplate['memory'],
+ disk=0, # Disk is independent of hardware template.
+ bandwidth=0, # There is no way to keep track of bandwidth.
+ price=0, # The billing system is external.
+ driver=self,)
+ )
+
+ return sizes
+
+ def list_locations(self):
+ """
+ This feature does not exist in ECP. Returns hard coded dummy location.
+
+ :rtype: ``list`` of :class:`NodeLocation`
+ """
+ return [NodeLocation(id=1,
+ name="Cloud",
+ country='',
+ driver=self),
+ ]
+
+ def create_node(self, **kwargs):
+ """
+ Creates a virtual machine.
+
+ :keyword name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :keyword size: The size of resources allocated to this node .
+ (required)
+ :type size: :class:`NodeSize`
+
+ :keyword image: OS Image to boot on node. (required)
+ :type image: :class:`NodeImage`
+
+ :rtype: :class:`Node`
+ """
+
+ # Find out what network to put the VM on.
+ res = self.connection.request(
+ '/rest/hosting/network/list').parse_body()
+
+ # Use the first / default network because there is no way to specific
+ # which one
+ network = res['networks'][0]['uuid']
+
+ # Prepare to make the VM
+ data = {
+ 'name': str(kwargs['name']),
+ 'package': str(kwargs['image'].id),
+ 'hardware': str(kwargs['size'].id),
+ 'network_uuid': str(network),
+ 'disk': ''
+ }
+
+ # Black magic to make the POST requests work
+ d = self.connection._encode_multipart_formdata(data)
+ response = self.connection.request(
+ '/rest/hosting/vm/',
+ method='PUT',
+ headers=d[0],
+ data=d[1]
+ ).parse_body()
+
+ # Create a node object and return it.
+ n = Node(
+ id=response['machine_id'],
+ name=data['name'],
+ state=NodeState.PENDING,
+ public_ips=[],
+ private_ips=[],
+ driver=self,
+ )
+
+ return n
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/elastichosts.py b/awx/lib/site-packages/libcloud/compute/drivers/elastichosts.py
new file mode 100644
index 0000000000..736ac7f0b7
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/elastichosts.py
@@ -0,0 +1,236 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ElasticHosts Driver
+"""
+
+from libcloud.compute.types import Provider
+from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver
+
+
+# API end-points
+API_ENDPOINTS = {
+ 'lon-p': {
+ 'name': 'London Peer 1',
+ 'country': 'United Kingdom',
+ 'host': 'api-lon-p.elastichosts.com'
+ },
+ 'lon-b': {
+ 'name': 'London BlueSquare',
+ 'country': 'United Kingdom',
+ 'host': 'api-lon-b.elastichosts.com'
+ },
+ 'sat-p': {
+ 'name': 'San Antonio Peer 1',
+ 'country': 'United States',
+ 'host': 'api-sat-p.elastichosts.com'
+ },
+ 'lax-p': {
+ 'name': 'Los Angeles Peer 1',
+ 'country': 'United States',
+ 'host': 'api-lax-p.elastichosts.com'
+ },
+ 'sjc-c': {
+ 'name': 'San Jose (Silicon Valley)',
+ 'country': 'United States',
+ 'host': 'api-sjc-c.elastichosts.com'
+ },
+ 'tor-p': {
+ 'name': 'Toronto Peer 1',
+ 'country': 'Canada',
+ 'host': 'api-tor-p.elastichosts.com'
+ },
+ 'syd-y': {
+ 'name': 'Sydney',
+ 'country': 'Australia',
+ 'host': 'api-syd-v.elastichosts.com'
+ },
+ 'cn-1': {
+ 'name': 'Hong Kong',
+ 'country': 'China',
+ 'host': 'api-hkg-e.elastichosts.com'
+ }
+}
+
+# Default API end-point for the base connection class.
+DEFAULT_REGION = 'sat-p'
+
+# Retrieved from http://www.elastichosts.com/cloud-hosting/api
+STANDARD_DRIVES = {
+ '38df0986-4d85-4b76-b502-3878ffc80161': {
+ 'uuid': '38df0986-4d85-4b76-b502-3878ffc80161',
+ 'description': 'CentOS Linux 5.5',
+ 'size_gunzipped': '3GB',
+ 'supports_deployment': True,
+ },
+ '980cf63c-f21e-4382-997b-6541d5809629': {
+ 'uuid': '980cf63c-f21e-4382-997b-6541d5809629',
+ 'description': 'Debian Linux 5.0',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ 'aee5589a-88c3-43ef-bb0a-9cab6e64192d': {
+ 'uuid': 'aee5589a-88c3-43ef-bb0a-9cab6e64192d',
+ 'description': 'Ubuntu Linux 10.04',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ '62f512cd-82c7-498e-88d8-a09ac2ef20e7': {
+ 'uuid': '62f512cd-82c7-498e-88d8-a09ac2ef20e7',
+ 'description': 'Ubuntu Linux 12.04',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': {
+ 'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0',
+ 'description': 'Windows Web Server 2008',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ },
+ '30824e97-05a4-410c-946e-2ba5a92b07cb': {
+ 'uuid': '30824e97-05a4-410c-946e-2ba5a92b07cb',
+ 'description': 'Windows Web Server 2008 R2',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ },
+ '9ecf810e-6ad1-40ef-b360-d606f0444671': {
+ 'uuid': '9ecf810e-6ad1-40ef-b360-d606f0444671',
+ 'description': 'Windows Web Server 2008 R2 + SQL Server',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ },
+ '10a88d1c-6575-46e3-8d2c-7744065ea530': {
+ 'uuid': '10a88d1c-6575-46e3-8d2c-7744065ea530',
+ 'description': 'Windows Server 2008 Standard R2',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ },
+ '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47': {
+ 'uuid': '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47',
+ 'description': 'Windows Server 2008 Standard R2 + SQL Server',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ },
+}
+
+
+class ElasticHostsException(Exception):
+ def __str__(self):
+ return self.args[0]
+
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class ElasticHostsNodeDriver(ElasticStackBaseNodeDriver):
+ """
+ Node Driver class for ElasticHosts
+ """
+ type = Provider.ELASTICHOSTS
+ api_name = 'elastichosts'
+ name = 'ElasticHosts'
+ website = 'http://www.elastichosts.com/'
+ features = {"create_node": ["generates_password"]}
+ _standard_drives = STANDARD_DRIVES
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region=DEFAULT_REGION, **kwargs):
+
+ if hasattr(self, '_region'):
+ region = self._region
+
+ if region not in API_ENDPOINTS:
+ raise ValueError('Invalid region: %s' % (region))
+
+ self._host_argument_set = host is not None
+ super(ElasticHostsNodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port,
+ region=region, **kwargs)
+
+ def _ex_connection_class_kwargs(self):
+ """
+ Return the host value based on the user supplied region.
+ """
+ kwargs = {}
+ if not self._host_argument_set:
+ kwargs['host'] = API_ENDPOINTS[self.region]['host']
+
+ return kwargs
+
+
+class ElasticHostsUK1NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the London Peer 1 end-point
+ """
+ name = 'ElasticHosts (lon-p)'
+ _region = 'lon-p'
+
+
+class ElasticHostsUK2NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the London Bluesquare end-point
+ """
+ name = 'ElasticHosts (lon-b)'
+ _region = 'lon-b'
+
+
+class ElasticHostsUS1NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the San Antonio Peer 1 end-point
+ """
+ name = 'ElasticHosts (sat-p)'
+ _region = 'sat-p'
+
+
+class ElasticHostsUS2NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the Los Angeles Peer 1 end-point
+ """
+ name = 'ElasticHosts (lax-p)'
+ _region = 'lax-p'
+
+
+class ElasticHostsUS3NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the San Jose (Silicon Valley) end-point
+ """
+ name = 'ElasticHosts (sjc-c)'
+ _region = 'sjc-c'
+
+
+class ElasticHostsCA1NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the Toronto Peer 1 end-point
+ """
+ name = 'ElasticHosts (tor-p)'
+ _region = 'tor-p'
+
+
+class ElasticHostsAU1NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the Sydney end-point
+ """
+ name = 'ElasticHosts (syd-y)'
+ _region = 'syd-y'
+
+
+class ElasticHostsCN1NodeDriver(ElasticHostsNodeDriver):
+ """
+ ElasticHosts node driver for the Hong Kong end-point
+ """
+ name = 'ElasticHosts (cn-1)'
+ _region = 'cn-1'
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/elasticstack.py b/awx/lib/site-packages/libcloud/compute/drivers/elasticstack.py
new file mode 100644
index 0000000000..da3863a3ef
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/elasticstack.py
@@ -0,0 +1,488 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Base driver for the providers based on the ElasticStack platform -
+http://www.elasticstack.com.
+"""
+
+import re
+import time
+import base64
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse
+from libcloud.common.types import InvalidCredsError
+from libcloud.compute.types import NodeState
+from libcloud.compute.base import NodeDriver, NodeSize, Node
+from libcloud.compute.base import NodeImage
+from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment
+from libcloud.compute.deployment import MultiStepDeployment
+
+
+NODE_STATE_MAP = {
+ 'active': NodeState.RUNNING,
+ 'dead': NodeState.TERMINATED,
+ 'dumped': NodeState.TERMINATED,
+}
+
+# Default timeout (in seconds) for the drive imaging process
+IMAGING_TIMEOUT = 10 * 60
+
+# ElasticStack doesn't specify special instance types, so I just specified
+# some plans based on the other provider offerings.
+#
+# Basically for CPU any value between 500Mhz and 20000Mhz should work,
+# 256MB to 8192MB for ram and 1GB to 2TB for disk.
+INSTANCE_TYPES = {
+ 'small': {
+ 'id': 'small',
+ 'name': 'Small instance',
+ 'cpu': 2000,
+ 'memory': 1700,
+ 'disk': 160,
+ 'bandwidth': None,
+ },
+ 'medium': {
+ 'id': 'medium',
+ 'name': 'Medium instance',
+ 'cpu': 3000,
+ 'memory': 4096,
+ 'disk': 500,
+ 'bandwidth': None,
+ },
+ 'large': {
+ 'id': 'large',
+ 'name': 'Large instance',
+ 'cpu': 4000,
+ 'memory': 7680,
+ 'disk': 850,
+ 'bandwidth': None,
+ },
+ 'extra-large': {
+ 'id': 'extra-large',
+ 'name': 'Extra Large instance',
+ 'cpu': 8000,
+ 'memory': 8192,
+ 'disk': 1690,
+ 'bandwidth': None,
+ },
+ 'high-cpu-medium': {
+ 'id': 'high-cpu-medium',
+ 'name': 'High-CPU Medium instance',
+ 'cpu': 5000,
+ 'memory': 1700,
+ 'disk': 350,
+ 'bandwidth': None,
+ },
+ 'high-cpu-extra-large': {
+ 'id': 'high-cpu-extra-large',
+ 'name': 'High-CPU Extra Large instance',
+ 'cpu': 20000,
+ 'memory': 7168,
+ 'disk': 1690,
+ 'bandwidth': None,
+ },
+}
+
+
+class ElasticStackException(Exception):
+ def __str__(self):
+ return self.args[0]
+
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class ElasticStackResponse(JsonResponse):
+ def success(self):
+ if self.status == 401:
+ raise InvalidCredsError()
+
+ return self.status >= 200 and self.status <= 299
+
+ def parse_error(self):
+ error_header = self.headers.get('x-elastic-error', '')
+ return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip())
+
+
+class ElasticStackNodeSize(NodeSize):
+ def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver):
+ self.id = id
+ self.name = name
+ self.cpu = cpu
+ self.ram = ram
+ self.disk = disk
+ self.bandwidth = bandwidth
+ self.price = price
+ self.driver = driver
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.cpu, self.ram,
+ self.disk, self.bandwidth, self.price, self.driver.name))
+
+
+class ElasticStackBaseConnection(ConnectionUserAndKey):
+ """
+ Base connection class for the ElasticStack driver
+ """
+
+ host = None
+ responseCls = ElasticStackResponse
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'application/json'
+ headers['Content-Type'] = 'application/json'
+ headers['Authorization'] = \
+ ('Basic %s' % (base64.b64encode(b('%s:%s' % (self.user_id,
+ self.key))))
+ .decode('utf-8'))
+ return headers
+
+
+class ElasticStackBaseNodeDriver(NodeDriver):
+ website = 'http://www.elasticstack.com'
+ connectionCls = ElasticStackBaseConnection
+ features = {"create_node": ["generates_password"]}
+
+ def reboot_node(self, node):
+ # Reboots the node
+ response = self.connection.request(
+ action='/servers/%s/reset' % (node.id),
+ method='POST'
+ )
+ return response.status == 204
+
+ def destroy_node(self, node):
+ # Kills the server immediately
+ response = self.connection.request(
+ action='/servers/%s/destroy' % (node.id),
+ method='POST'
+ )
+ return response.status == 204
+
+ def list_images(self, location=None):
+ # Returns a list of available pre-installed system drive images
+ images = []
+ for key, value in self._standard_drives.items():
+ image = NodeImage(
+ id=value['uuid'],
+ name=value['description'],
+ driver=self.connection.driver,
+ extra={
+ 'size_gunzipped': value['size_gunzipped']
+ }
+ )
+ images.append(image)
+
+ return images
+
+ def list_sizes(self, location=None):
+ sizes = []
+ for key, value in INSTANCE_TYPES.items():
+ size = ElasticStackNodeSize(
+ id=value['id'],
+ name=value['name'], cpu=value['cpu'], ram=value['memory'],
+ disk=value['disk'], bandwidth=value['bandwidth'],
+ price=self._get_size_price(size_id=value['id']),
+ driver=self.connection.driver
+ )
+ sizes.append(size)
+
+ return sizes
+
+ def list_nodes(self):
+ # Returns a list of active (running) nodes
+ response = self.connection.request(action='/servers/info').object
+
+ nodes = []
+ for data in response:
+ node = self._to_node(data)
+ nodes.append(node)
+
+ return nodes
+
+ def create_node(self, **kwargs):
+ """Creates a ElasticStack instance
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :keyword smp: Number of virtual processors or None to calculate
+ based on the cpu speed
+ :type smp: ``int``
+
+ :keyword nic_model: e1000, rtl8139 or virtio
+ (if not specified, e1000 is used)
+ :type nic_model: ``str``
+
+ :keyword vnc_password: If set, the same password is also used for
+ SSH access with user toor,
+ otherwise VNC access is disabled and
+ no SSH login is possible.
+ :type vnc_password: ``str``
+ """
+ size = kwargs['size']
+ image = kwargs['image']
+ smp = kwargs.get('smp', 'auto')
+ nic_model = kwargs.get('nic_model', 'e1000')
+ vnc_password = ssh_password = kwargs.get('vnc_password', None)
+
+ if nic_model not in ('e1000', 'rtl8139', 'virtio'):
+ raise ElasticStackException('Invalid NIC model specified')
+
+ # check that drive size is not smaller than pre installed image size
+
+ # First we create a drive with the specified size
+ drive_data = {}
+ drive_data.update({'name': kwargs['name'],
+ 'size': '%sG' % (kwargs['size'].disk)})
+
+ response = self.connection.request(action='/drives/create',
+ data=json.dumps(drive_data),
+ method='POST').object
+
+ if not response:
+ raise ElasticStackException('Drive creation failed')
+
+ drive_uuid = response['drive']
+
+ # Then we image the selected pre-installed system drive onto it
+ response = self.connection.request(
+ action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id),
+ method='POST'
+ )
+
+ if response.status not in (200, 204):
+ raise ElasticStackException('Drive imaging failed')
+
+ # We wait until the drive is imaged and then boot up the node
+ # (in most cases, the imaging process shouldn't take longer
+ # than a few minutes)
+ response = self.connection.request(
+ action='/drives/%s/info' % (drive_uuid)
+ ).object
+
+ imaging_start = time.time()
+ while 'imaging' in response:
+ response = self.connection.request(
+ action='/drives/%s/info' % (drive_uuid)
+ ).object
+
+ elapsed_time = time.time() - imaging_start
+ if ('imaging' in response and elapsed_time >= IMAGING_TIMEOUT):
+ raise ElasticStackException('Drive imaging timed out')
+
+ time.sleep(1)
+
+ node_data = {}
+ node_data.update({'name': kwargs['name'],
+ 'cpu': size.cpu,
+ 'mem': size.ram,
+ 'ide:0:0': drive_uuid,
+ 'boot': 'ide:0:0',
+ 'smp': smp})
+ node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'})
+
+ if vnc_password:
+ node_data.update({'vnc': 'auto', 'vnc:password': vnc_password})
+
+ response = self.connection.request(
+ action='/servers/create', data=json.dumps(node_data),
+ method='POST'
+ ).object
+
+ if isinstance(response, list):
+ nodes = [self._to_node(node, ssh_password) for node in response]
+ else:
+ nodes = self._to_node(response, ssh_password)
+
+ return nodes
+
+ # Extension methods
+ def ex_set_node_configuration(self, node, **kwargs):
+ """
+ Changes the configuration of the running server
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :param kwargs: keyword arguments
+ :type kwargs: ``dict``
+
+ :rtype: ``bool``
+ """
+ valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$',
+ '^boot$', '^nic:0:model$', '^nic:0:dhcp',
+ '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$',
+ '^vnc:ip$', '^vnc:password$', '^vnc:tls',
+ '^ide:[0-1]:[0-1](:media)?$',
+ '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$')
+
+ invalid_keys = []
+ keys = list(kwargs.keys())
+ for key in keys:
+ matches = False
+ for regex in valid_keys:
+ if re.match(regex, key):
+ matches = True
+ break
+ if not matches:
+ invalid_keys.append(key)
+
+ if invalid_keys:
+ raise ElasticStackException(
+ 'Invalid configuration key specified: %s'
+ % (',' .join(invalid_keys))
+ )
+
+ response = self.connection.request(
+ action='/servers/%s/set' % (node.id), data=json.dumps(kwargs),
+ method='POST'
+ )
+
+ return (response.status == httplib.OK and response.body != '')
+
+ def deploy_node(self, **kwargs):
+ """
+ Create a new node, and start deployment.
+
+ @inherits: :class:`NodeDriver.deploy_node`
+
+ :keyword enable_root: If true, root password will be set to
+ vnc_password (this will enable SSH access)
+ and default 'toor' account will be deleted.
+ :type enable_root: ``bool``
+ """
+ image = kwargs['image']
+ vnc_password = kwargs.get('vnc_password', None)
+ enable_root = kwargs.get('enable_root', False)
+
+ if not vnc_password:
+ raise ValueError('You need to provide vnc_password argument '
+ 'if you want to use deployment')
+
+ if (image in self._standard_drives and
+ not self._standard_drives[image]['supports_deployment']):
+ raise ValueError('Image %s does not support deployment'
+ % (image.id))
+
+ if enable_root:
+ script = ("unset HISTFILE;"
+ "echo root:%s | chpasswd;"
+ "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;"
+ "history -c") % vnc_password
+ root_enable_script = ScriptDeployment(script=script,
+ delete=True)
+ deploy = kwargs.get('deploy', None)
+ if deploy:
+ if (isinstance(deploy, ScriptDeployment) or
+ isinstance(deploy, SSHKeyDeployment)):
+ deployment = MultiStepDeployment([deploy,
+ root_enable_script])
+ elif isinstance(deploy, MultiStepDeployment):
+ deployment = deploy
+ deployment.add(root_enable_script)
+ else:
+ deployment = root_enable_script
+
+ kwargs['deploy'] = deployment
+
+ if not kwargs.get('ssh_username', None):
+ kwargs['ssh_username'] = 'toor'
+
+ return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs)
+
+ def ex_shutdown_node(self, node):
+ """
+ Sends the ACPI power-down event
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/servers/%s/shutdown' % (node.id),
+ method='POST'
+ )
+ return response.status == 204
+
+ def ex_destroy_drive(self, drive_uuid):
+ """
+ Deletes a drive
+
+ :param drive_uuid: Drive uuid which should be used
+ :type drive_uuid: ``str``
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request(
+ action='/drives/%s/destroy' % (drive_uuid),
+ method='POST'
+ )
+ return response.status == 204
+
+ # Helper methods
+ def _to_node(self, data, ssh_password=None):
+ try:
+ state = NODE_STATE_MAP[data['status']]
+ except KeyError:
+ state = NodeState.UNKNOWN
+
+ if isinstance(data['nic:0:dhcp'], list):
+ public_ip = data['nic:0:dhcp']
+ else:
+ public_ip = [data['nic:0:dhcp']]
+
+ extra = {'cpu': data['cpu'],
+ 'smp': data['smp'],
+ 'mem': data['mem'],
+ 'started': data['started']}
+
+ if 'vnc:ip' in data:
+ extra['vnc:ip'] = data['vnc:ip']
+
+ if 'vnc:password' in data:
+ extra['vnc:password'] = data['vnc:password']
+
+ boot_device = data['boot']
+
+ if isinstance(boot_device, list):
+ for device in boot_device:
+ extra[device] = data[device]
+ else:
+ extra[boot_device] = data[boot_device]
+
+ if ssh_password:
+ extra.update({'password': ssh_password})
+
+ node = Node(id=data['server'], name=data['name'], state=state,
+ public_ips=public_ip, private_ips=None,
+ driver=self.connection.driver,
+ extra=extra)
+
+ return node
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/exoscale.py b/awx/lib/site-packages/libcloud/compute/drivers/exoscale.py
new file mode 100644
index 0000000000..9f883e0b5f
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/exoscale.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.compute.providers import Provider
+from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
+
+__all__ = [
+ 'ExoscaleNodeDriver'
+]
+
+
+class ExoscaleNodeDriver(CloudStackNodeDriver):
+ type = Provider.EXOSCALE
+ name = 'Exoscale'
+ website = 'https://www.exoscale.ch/'
+
+ # API endpoint info
+ host = 'api.exoscale.ch'
+ path = '/compute'
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/gandi.py b/awx/lib/site-packages/libcloud/compute/drivers/gandi.py
new file mode 100644
index 0000000000..e5593b4684
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/gandi.py
@@ -0,0 +1,619 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Gandi driver for compute
+"""
+import sys
+from datetime import datetime
+
+from libcloud.common.gandi import BaseGandiDriver, GandiException,\
+ NetworkInterface, IPAddress, Disk
+from libcloud.compute.base import StorageVolume
+from libcloud.compute.types import NodeState, Provider
+from libcloud.compute.base import Node, NodeDriver
+from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
+
+
+NODE_STATE_MAP = {
+ 'running': NodeState.RUNNING,
+ 'halted': NodeState.TERMINATED,
+ 'paused': NodeState.TERMINATED,
+ 'locked': NodeState.TERMINATED,
+ 'being_created': NodeState.PENDING,
+ 'invalid': NodeState.UNKNOWN,
+ 'legally_locked': NodeState.PENDING,
+ 'deleted': NodeState.TERMINATED
+}
+
+NODE_PRICE_HOURLY_USD = 0.02
+
+INSTANCE_TYPES = {
+ 'small': {
+ 'id': 'small',
+ 'name': 'Small instance',
+ 'cpu': 1,
+ 'memory': 256,
+ 'disk': 3,
+ 'bandwidth': 10240,
+ },
+ 'medium': {
+ 'id': 'medium',
+ 'name': 'Medium instance',
+ 'cpu': 1,
+ 'memory': 1024,
+ 'disk': 20,
+ 'bandwidth': 10240,
+ },
+ 'large': {
+ 'id': 'large',
+ 'name': 'Large instance',
+ 'cpu': 2,
+ 'memory': 2048,
+ 'disk': 50,
+ 'bandwidth': 10240,
+ },
+ 'x-large': {
+ 'id': 'x-large',
+ 'name': 'Extra Large instance',
+ 'cpu': 4,
+ 'memory': 4096,
+ 'disk': 100,
+ 'bandwidth': 10240,
+ },
+}
+
+
+class GandiNodeDriver(BaseGandiDriver, NodeDriver):
+ """
+ Gandi node driver
+
+ """
+ api_name = 'gandi'
+ friendly_name = 'Gandi.net'
+ website = 'http://www.gandi.net/'
+ country = 'FR'
+ type = Provider.GANDI
+ # TODO : which features to enable ?
+ features = {}
+
+ def __init__(self, *args, **kwargs):
+ """
+ @inherits: :class:`NodeDriver.__init__`
+ """
+ super(BaseGandiDriver, self).__init__(*args, **kwargs)
+
+ def _resource_info(self, type, id):
+ try:
+ obj = self.connection.request('hosting.%s.info' % type, int(id))
+ return obj.object
+ except Exception:
+ e = sys.exc_info()[1]
+ raise GandiException(1003, e)
+ return None
+
+ def _node_info(self, id):
+ return self._resource_info('vm', id)
+
+ def _volume_info(self, id):
+ return self._resource_info('disk', id)
+
+ # Generic methods for driver
+ def _to_node(self, vm):
+ return Node(
+ id=vm['id'],
+ name=vm['hostname'],
+ state=NODE_STATE_MAP.get(
+ vm['state'],
+ NodeState.UNKNOWN
+ ),
+ public_ips=vm.get('ips', []),
+ private_ips=[],
+ driver=self,
+ extra={
+ 'ai_active': vm.get('ai_active'),
+ 'datacenter_id': vm.get('datacenter_id'),
+ 'description': vm.get('description')
+ }
+ )
+
+ def _to_nodes(self, vms):
+ return [self._to_node(v) for v in vms]
+
+ def _to_volume(self, disk):
+ extra = {'can_snapshot': disk['can_snapshot']}
+ return StorageVolume(
+ id=disk['id'],
+ name=disk['name'],
+ size=int(disk['size']),
+ driver=self,
+ extra=extra)
+
+ def _to_volumes(self, disks):
+ return [self._to_volume(d) for d in disks]
+
+ def list_nodes(self):
+ vms = self.connection.request('hosting.vm.list').object
+ ips = self.connection.request('hosting.ip.list').object
+ for vm in vms:
+ vm['ips'] = []
+ for ip in ips:
+ if vm['ifaces_id'][0] == ip['iface_id']:
+ ip = ip.get('ip', None)
+ if ip:
+ vm['ips'].append(ip)
+
+ nodes = self._to_nodes(vms)
+ return nodes
+
+ def reboot_node(self, node):
+ op = self.connection.request('hosting.vm.reboot', int(node.id))
+ self._wait_operation(op.object['id'])
+ vm = self._node_info(int(node.id))
+ if vm['state'] == 'running':
+ return True
+ return False
+
+ def destroy_node(self, node):
+ vm = self._node_info(node.id)
+ if vm['state'] == 'running':
+ # Send vm_stop and wait for accomplish
+ op_stop = self.connection.request('hosting.vm.stop', int(node.id))
+ if not self._wait_operation(op_stop.object['id']):
+ raise GandiException(1010, 'vm.stop failed')
+ # Delete
+ op = self.connection.request('hosting.vm.delete', int(node.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def deploy_node(self, **kwargs):
+ """
+ deploy_node is not implemented for gandi driver
+
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'deploy_node not implemented for gandi driver')
+
+ def create_node(self, **kwargs):
+ """
+ Create a new Gandi node
+
+ :keyword name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :keyword image: OS Image to boot on node. (required)
+ :type image: :class:`NodeImage`
+
+ :keyword location: Which data center to create a node in. If empty,
+ undefined behavior will be selected. (optional)
+ :type location: :class:`NodeLocation`
+
+ :keyword size: The size of resources allocated to this node.
+ (required)
+ :type size: :class:`NodeSize`
+
+ :keyword login: user name to create for login on machine (required)
+ :type login: ``str``
+
+ :keyword password: password for user that'll be created (required)
+ :type password: ``str``
+
+ :keyword inet_family: version of ip to use, default 4 (optional)
+ :type inet_family: ``int``
+
+ :rtype: :class:`Node`
+ """
+
+ if kwargs.get('login') is None or kwargs.get('password') is None:
+ raise GandiException(
+ 1020, 'login and password must be defined for node creation')
+
+ location = kwargs.get('location')
+ if location and isinstance(location, NodeLocation):
+ dc_id = int(location.id)
+ else:
+ raise GandiException(
+ 1021, 'location must be a subclass of NodeLocation')
+
+ size = kwargs.get('size')
+ if not size and not isinstance(size, NodeSize):
+ raise GandiException(
+ 1022, 'size must be a subclass of NodeSize')
+
+ # If size name is in INSTANCE_TYPE we use new rating model
+ instance = INSTANCE_TYPES.get(size.id)
+ cores = instance['cpu'] if instance else int(size.id)
+
+ src_disk_id = int(kwargs['image'].id)
+
+ disk_spec = {
+ 'datacenter_id': dc_id,
+ 'name': 'disk_%s' % kwargs['name']
+ }
+
+ vm_spec = {
+ 'datacenter_id': dc_id,
+ 'hostname': kwargs['name'],
+ 'login': kwargs['login'],
+ 'password': kwargs['password'], # TODO : use NodeAuthPassword
+ 'memory': int(size.ram),
+ 'cores': cores,
+ 'bandwidth': int(size.bandwidth),
+ 'ip_version': kwargs.get('inet_family', 4),
+ }
+
+ # Call create_from helper api. Return 3 operations : disk_create,
+ # iface_create,vm_create
+ (op_disk, op_iface, op_vm) = self.connection.request(
+ 'hosting.vm.create_from',
+ vm_spec, disk_spec, src_disk_id
+ ).object
+
+ # We wait for vm_create to finish
+ if self._wait_operation(op_vm['id']):
+ # after successful operation, get ip information
+ # thru first interface
+ node = self._node_info(op_vm['vm_id'])
+ ifaces = node.get('ifaces')
+ if len(ifaces) > 0:
+ ips = ifaces[0].get('ips')
+ if len(ips) > 0:
+ node['ip'] = ips[0]['ip']
+ return self._to_node(node)
+
+ return None
+
+ def _to_image(self, img):
+ return NodeImage(
+ id=img['disk_id'],
+ name=img['label'],
+ driver=self.connection.driver
+ )
+
+ def list_images(self, location=None):
+ try:
+ if location:
+ filtering = {'datacenter_id': int(location.id)}
+ else:
+ filtering = {}
+ images = self.connection.request('hosting.image.list', filtering)
+ return [self._to_image(i) for i in images.object]
+ except Exception:
+ e = sys.exc_info()[1]
+ raise GandiException(1011, e)
+
+ def _to_size(self, id, size):
+ return NodeSize(
+ id=id,
+ name='%s cores' % id,
+ ram=size['memory'],
+ disk=size['disk'],
+ bandwidth=size['bandwidth'],
+ price=(self._get_size_price(size_id='1') * id),
+ driver=self.connection.driver,
+ )
+
+ def _instance_type_to_size(self, instance):
+ return NodeSize(
+ id=instance['id'],
+ name=instance['name'],
+ ram=instance['memory'],
+ disk=instance['disk'],
+ bandwidth=instance['bandwidth'],
+ price=self._get_size_price(size_id=instance['id']),
+ driver=self.connection.driver,
+ )
+
+ def list_instance_type(self, location=None):
+ return [self._instance_type_to_size(instance)
+ for name, instance in INSTANCE_TYPES.items()]
+
+ def list_sizes(self, location=None):
+ account = self.connection.request('hosting.account.info').object
+ if account.get('rating_enabled'):
+ # This account use new rating model
+ return self.list_instance_type(location)
+ # Look for available shares, and return a list of share_definition
+ available_res = account['resources']['available']
+
+ if available_res['shares'] == 0:
+ return None
+ else:
+ share_def = account['share_definition']
+ available_cores = available_res['cores']
+ # 0.75 core given when creating a server
+ max_core = int(available_cores + 0.75)
+ shares = []
+ if available_res['servers'] < 1:
+ # No server quota, no way
+ return shares
+ for i in range(1, max_core + 1):
+ share = {id: i}
+ share_is_available = True
+ for k in ['memory', 'disk', 'bandwidth']:
+ if share_def[k] * i > available_res[k]:
+ # We run out for at least one resource inside
+ share_is_available = False
+ else:
+ share[k] = share_def[k] * i
+ if share_is_available:
+ nb_core = i
+ shares.append(self._to_size(nb_core, share))
+ return shares
+
+ def _to_loc(self, loc):
+ return NodeLocation(
+ id=loc['id'],
+ name=loc['name'],
+ country=loc['country'],
+ driver=self
+ )
+
+ def list_locations(self):
+ res = self.connection.request('hosting.datacenter.list')
+ return [self._to_loc(l) for l in res.object]
+
+ def list_volumes(self):
+ """
+
+ :rtype: ``list`` of :class:`StorageVolume`
+ """
+ res = self.connection.request('hosting.disk.list', {})
+ return self._to_volumes(res.object)
+
+ def create_volume(self, size, name, location=None, snapshot=None):
+ disk_param = {
+ 'name': name,
+ 'size': int(size),
+ 'datacenter_id': int(location.id)
+ }
+ if snapshot:
+ op = self.connection.request('hosting.disk.create_from',
+ disk_param, int(snapshot.id))
+ else:
+ op = self.connection.request('hosting.disk.create', disk_param)
+ if self._wait_operation(op.object['id']):
+ disk = self._volume_info(op.object['disk_id'])
+ return self._to_volume(disk)
+ return None
+
+ def attach_volume(self, node, volume, device=None):
+ op = self.connection.request('hosting.vm.disk_attach',
+ int(node.id), int(volume.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def detach_volume(self, node, volume):
+ """
+ Detaches a volume from a node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :param volume: Volume to be detached
+ :type volume: :class:`StorageVolume`
+
+ :rtype: ``bool``
+ """
+ op = self.connection.request('hosting.vm.disk_detach',
+ int(node.id), int(volume.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def destroy_volume(self, volume):
+ op = self.connection.request('hosting.disk.delete', int(volume.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def _to_iface(self, iface):
+ ips = []
+ for ip in iface.get('ips', []):
+ new_ip = IPAddress(
+ ip['id'],
+ NODE_STATE_MAP.get(
+ ip['state'],
+ NodeState.UNKNOWN
+ ),
+ ip['ip'],
+ self.connection.driver,
+ version=ip.get('version'),
+ extra={'reverse': ip['reverse']}
+ )
+ ips.append(new_ip)
+ return NetworkInterface(
+ iface['id'],
+ NODE_STATE_MAP.get(
+ iface['state'],
+ NodeState.UNKNOWN
+ ),
+ mac_address=None,
+ driver=self.connection.driver,
+ ips=ips,
+ node_id=iface.get('vm_id'),
+ extra={'bandwidth': iface['bandwidth']},
+ )
+
+ def _to_ifaces(self, ifaces):
+ return [self._to_iface(i) for i in ifaces]
+
+ def ex_list_interfaces(self):
+ """
+ Specific method to list network interfaces
+
+ :rtype: ``list`` of :class:`GandiNetworkInterface`
+ """
+ ifaces = self.connection.request('hosting.iface.list').object
+ ips = self.connection.request('hosting.ip.list').object
+ for iface in ifaces:
+ iface['ips'] = list(
+ filter(lambda i: i['iface_id'] == iface['id'], ips))
+ return self._to_ifaces(ifaces)
+
+ def _to_disk(self, element):
+ disk = Disk(
+ id=element['id'],
+ state=NODE_STATE_MAP.get(
+ element['state'],
+ NodeState.UNKNOWN
+ ),
+ name=element['name'],
+ driver=self.connection.driver,
+ size=element['size'],
+ extra={'can_snapshot': element['can_snapshot']}
+ )
+ return disk
+
+ def _to_disks(self, elements):
+ return [self._to_disk(el) for el in elements]
+
+ def ex_list_disks(self):
+ """
+ Specific method to list all disk
+
+ :rtype: ``list`` of :class:`GandiDisk`
+ """
+ res = self.connection.request('hosting.disk.list', {})
+ return self._to_disks(res.object)
+
+ def ex_node_attach_disk(self, node, disk):
+ """
+ Specific method to attach a disk to a node
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :param disk: Disk which should be used
+ :type disk: :class:`GandiDisk`
+
+ :rtype: ``bool``
+ """
+ op = self.connection.request('hosting.vm.disk_attach',
+ int(node.id), int(disk.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def ex_node_detach_disk(self, node, disk):
+ """
+ Specific method to detach a disk from a node
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :param disk: Disk which should be used
+ :type disk: :class:`GandiDisk`
+
+ :rtype: ``bool``
+ """
+ op = self.connection.request('hosting.vm.disk_detach',
+ int(node.id), int(disk.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def ex_node_attach_interface(self, node, iface):
+ """
+ Specific method to attach an interface to a node
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+
+ :param iface: Network interface which should be used
+ :type iface: :class:`GandiNetworkInterface`
+
+ :rtype: ``bool``
+ """
+ op = self.connection.request('hosting.vm.iface_attach',
+ int(node.id), int(iface.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def ex_node_detach_interface(self, node, iface):
+ """
+ Specific method to detach an interface from a node
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+
+ :param iface: Network interface which should be used
+ :type iface: :class:`GandiNetworkInterface`
+
+ :rtype: ``bool``
+ """
+ op = self.connection.request('hosting.vm.iface_detach',
+ int(node.id), int(iface.id))
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def ex_snapshot_disk(self, disk, name=None):
+ """
+ Specific method to make a snapshot of a disk
+
+ :param disk: Disk which should be used
+ :type disk: :class:`GandiDisk`
+
+ :param name: Name which should be used
+ :type name: ``str``
+
+ :rtype: ``bool``
+ """
+ if not disk.extra.get('can_snapshot'):
+ raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id)
+ if not name:
+ suffix = datetime.today().strftime('%Y%m%d')
+ name = 'snap_%s' % (suffix)
+ op = self.connection.request(
+ 'hosting.disk.create_from',
+ {'name': name, 'type': 'snapshot', },
+ int(disk.id),
+ )
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
+
+ def ex_update_disk(self, disk, new_size=None, new_name=None):
+ """Specific method to update size or name of a disk
+ WARNING: if a server is attached it'll be rebooted
+
+ :param disk: Disk which should be used
+ :type disk: :class:`GandiDisk`
+
+ :param new_size: New size
+ :type new_size: ``int``
+
+ :param new_name: New name
+ :type new_name: ``str``
+
+ :rtype: ``bool``
+ """
+ params = {}
+ if new_size:
+ params.update({'size': new_size})
+ if new_name:
+ params.update({'name': new_name})
+ op = self.connection.request('hosting.disk.update',
+ int(disk.id),
+ params)
+ if self._wait_operation(op.object['id']):
+ return True
+ return False
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/gce.py b/awx/lib/site-packages/libcloud/compute/drivers/gce.py
new file mode 100644
index 0000000000..a1cf75e16b
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/gce.py
@@ -0,0 +1,3346 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Module for Google Compute Engine Driver.
+"""
+from __future__ import with_statement
+
+import datetime
+import time
+import sys
+
+from libcloud.common.google import GoogleResponse
+from libcloud.common.google import GoogleBaseConnection
+from libcloud.common.google import GoogleBaseError
+from libcloud.common.google import ResourceNotFoundError
+from libcloud.common.google import ResourceExistsError
+
+from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation
+from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot
+from libcloud.compute.base import UuidMixin
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState
+
+API_VERSION = 'v1'
+DEFAULT_TASK_COMPLETION_TIMEOUT = 180
+
+
+def timestamp_to_datetime(timestamp):
+ """
+ Return a datetime object that corresponds to the time in an RFC3339
+ timestamp.
+
+ :param timestamp: RFC3339 timestamp string
+ :type timestamp: ``str``
+
+ :return: Datetime object corresponding to timestamp
+ :rtype: :class:`datetime.datetime`
+ """
+ # We remove timezone offset and microseconds (Python 2.5 strptime doesn't
+ # support %f)
+ ts = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S')
+ tz_hours = int(timestamp[-5:-3])
+ tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + '1')
+ tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins)
+ return ts + tz_delta
+
+
+class GCEResponse(GoogleResponse):
+ pass
+
+
+class GCEConnection(GoogleBaseConnection):
+ """Connection class for the GCE driver."""
+ host = 'www.googleapis.com'
+ responseCls = GCEResponse
+
+ def __init__(self, user_id, key, secure, auth_type=None,
+ credential_file=None, project=None, **kwargs):
+ super(GCEConnection, self).__init__(user_id, key, secure=secure,
+ auth_type=auth_type,
+ credential_file=credential_file,
+ **kwargs)
+ self.request_path = '/compute/%s/projects/%s' % (API_VERSION,
+ project)
+
+
+class GCEAddress(UuidMixin):
+ """A GCE Static address."""
+ def __init__(self, id, name, address, region, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.address = address
+ self.region = region
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def destroy(self):
+ """
+ Destroy this address.
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_destroy_address(address=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.address)
+
+
+class GCEFailedDisk(object):
+ """Dummy Node object for disks that are not created."""
+ def __init__(self, name, error, code):
+ self.name = name
+ self.error = error
+ self.code = code
+
+ def __repr__(self):
+ return '' % (
+ self.name, self.code)
+
+
+class GCEFailedNode(object):
+ """Dummy Node object for nodes that are not created."""
+ def __init__(self, name, error, code):
+ self.name = name
+ self.error = error
+ self.code = code
+
+ def __repr__(self):
+ return '' % (
+ self.name, self.code)
+
+
+class GCEHealthCheck(UuidMixin):
+ """A GCE Http Health Check class."""
+ def __init__(self, id, name, path, port, interval, timeout,
+ unhealthy_threshold, healthy_threshold, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.path = path
+ self.port = port
+ self.interval = interval
+ self.timeout = timeout
+ self.unhealthy_threshold = unhealthy_threshold
+ self.healthy_threshold = healthy_threshold
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def destroy(self):
+ """
+ Destroy this Health Check.
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_destroy_healthcheck(healthcheck=self)
+
+ def update(self):
+ """
+ Commit updated healthcheck values.
+
+ :return: Updated Healthcheck object
+ :rtype: :class:`GCEHealthcheck`
+ """
+ return self.driver.ex_update_healthcheck(healthcheck=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.path, self.port)
+
+
+class GCEFirewall(UuidMixin):
+ """A GCE Firewall rule class."""
+ def __init__(self, id, name, allowed, network, source_ranges, source_tags,
+ target_tags, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.network = network
+ self.allowed = allowed
+ self.source_ranges = source_ranges
+ self.source_tags = source_tags
+ self.target_tags = target_tags
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def destroy(self):
+ """
+ Destroy this firewall.
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_destroy_firewall(firewall=self)
+
+ def update(self):
+ """
+ Commit updated firewall values.
+
+ :return: Updated Firewall object
+ :rtype: :class:`GCEFirewall`
+ """
+ return self.driver.ex_update_firewall(firewall=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.network.name)
+
+
+class GCEForwardingRule(UuidMixin):
+ def __init__(self, id, name, region, address, protocol, targetpool, driver,
+ extra=None):
+ self.id = str(id)
+ self.name = name
+ self.region = region
+ self.address = address
+ self.protocol = protocol
+ self.targetpool = targetpool
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def destroy(self):
+ """
+ Destroy this Forwarding Rule
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_destroy_forwarding_rule(forwarding_rule=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.address)
+
+
+class GCENodeImage(NodeImage):
+ """A GCE Node Image class."""
+ def __init__(self, id, name, driver, extra=None):
+ super(GCENodeImage, self).__init__(id, name, driver, extra=extra)
+
+ def delete(self):
+ """
+ Delete this image
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_delete_image(image=self)
+
+ def deprecate(self, replacement, state):
+ """
+ Deprecate this image
+
+ :param replacement: Image to use as a replacement
+ :type replacement: ``str`` or :class: `GCENodeImage`
+
+ :param state: Deprecation state of this image. Possible values include
+ \'DELETED\', \'DEPRECATED\' or \'OBSOLETE\'.
+ :type state: ``str``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_deprecate_image(self, replacement, state)
+
+
+class GCENetwork(UuidMixin):
+ """A GCE Network object class."""
+ def __init__(self, id, name, cidr, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.cidr = cidr
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def destroy(self):
+ """
+ Destroy this network
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_destroy_network(network=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.cidr)
+
+
+class GCENodeSize(NodeSize):
+ """A GCE Node Size (MachineType) class."""
+ def __init__(self, id, name, ram, disk, bandwidth, price, driver,
+ extra=None):
+ self.extra = extra
+ super(GCENodeSize, self).__init__(id, name, ram, disk, bandwidth,
+ price, driver, extra=extra)
+
+
+class GCEProject(UuidMixin):
+ """GCE Project information."""
+ def __init__(self, id, name, metadata, quotas, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.metadata = metadata
+ self.quotas = quotas
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def __repr__(self):
+ return '' % (self.id, self.name)
+
+
+class GCERegion(UuidMixin):
+ def __init__(self, id, name, status, zones, quotas, deprecated, driver,
+ extra=None):
+ self.id = str(id)
+ self.name = name
+ self.status = status
+ self.zones = zones
+ self.quotas = quotas
+ self.deprecated = deprecated
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.status)
+
+
+class GCESnapshot(VolumeSnapshot):
+ def __init__(self, id, name, size, status, driver, extra=None):
+ self.name = name
+ self.status = status
+ super(GCESnapshot, self).__init__(id, driver, size, extra)
+
+
+class GCETargetPool(UuidMixin):
+ def __init__(self, id, name, region, healthchecks, nodes, driver,
+ extra=None):
+ self.id = str(id)
+ self.name = name
+ self.region = region
+ self.healthchecks = healthchecks
+ self.nodes = nodes
+ self.driver = driver
+ self.extra = extra
+ UuidMixin.__init__(self)
+
+ def add_node(self, node):
+ """
+ Add a node to this target pool.
+
+ :param node: Node to add
+ :type node: ``str`` or :class:`Node`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_targetpool_add_node(targetpool=self, node=node)
+
+ def remove_node(self, node):
+ """
+ Remove a node from this target pool.
+
+ :param node: Node to remove
+ :type node: ``str`` or :class:`Node`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_targetpool_remove_node(targetpool=self,
+ node=node)
+
+ def add_healthcheck(self, healthcheck):
+ """
+ Add a healthcheck to this target pool.
+
+ :param healthcheck: Healthcheck to add
+ :type healthcheck: ``str`` or :class:`GCEHealthCheck`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_targetpool_add_healthcheck(
+ targetpool=self, healthcheck=healthcheck)
+
+ def remove_healthcheck(self, healthcheck):
+ """
+ Remove a healthcheck from this target pool.
+
+ :param healthcheck: Healthcheck to remove
+ :type healthcheck: ``str`` or :class:`GCEHealthCheck`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_targetpool_remove_healthcheck(
+ targetpool=self, healthcheck=healthcheck)
+
+ def destroy(self):
+ """
+ Destroy this Target Pool
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return self.driver.ex_destroy_targetpool(targetpool=self)
+
+ def __repr__(self):
+ return '' % (
+ self.id, self.name, self.region.name)
+
+
+class GCEZone(NodeLocation):
+ """Subclass of NodeLocation to provide additional information."""
+ def __init__(self, id, name, status, maintenance_windows, deprecated,
+ driver, extra=None):
+ self.status = status
+ self.maintenance_windows = maintenance_windows
+ self.deprecated = deprecated
+ self.extra = extra
+ country = name.split('-')[0]
+ super(GCEZone, self).__init__(id=str(id), name=name, country=country,
+ driver=driver)
+
+ @property
+ def time_until_mw(self):
+ """
+ Returns the time until the next Maintenance Window as a
+ datetime.timedelta object.
+ """
+ return self._get_time_until_mw()
+
+ @property
+ def next_mw_duration(self):
+ """
+ Returns the duration of the next Maintenance Window as a
+ datetime.timedelta object.
+ """
+ return self._get_next_mw_duration()
+
+ def _now(self):
+ """
+ Returns current UTC time.
+
+ Can be overridden in unittests.
+ """
+ return datetime.datetime.utcnow()
+
+ def _get_next_maint(self):
+ """
+ Returns the next Maintenance Window.
+
+ :return: A dictionary containing maintenance window info (or None if
+ no maintenance windows are scheduled)
+ The dictionary contains 4 keys with values of type ``str``
+ - name: The name of the maintenance window
+ - description: Description of the maintenance window
+ - beginTime: RFC3339 Timestamp
+ - endTime: RFC3339 Timestamp
+ :rtype: ``dict`` or ``None``
+ """
+ begin = None
+ next_window = None
+ if not self.maintenance_windows:
+ return None
+ if len(self.maintenance_windows) == 1:
+ return self.maintenance_windows[0]
+ for mw in self.maintenance_windows:
+ begin_next = timestamp_to_datetime(mw['beginTime'])
+ if (not begin) or (begin_next < begin):
+ begin = begin_next
+ next_window = mw
+ return next_window
+
+ def _get_time_until_mw(self):
+ """
+ Returns time until next maintenance window.
+
+ :return: Time until next maintenance window (or None if no
+ maintenance windows are scheduled)
+ :rtype: :class:`datetime.timedelta` or ``None``
+ """
+ next_window = self._get_next_maint()
+ if not next_window:
+ return None
+ now = self._now()
+ next_begin = timestamp_to_datetime(next_window['beginTime'])
+ return next_begin - now
+
+ def _get_next_mw_duration(self):
+ """
+ Returns the duration of the next maintenance window.
+
+ :return: Duration of next maintenance window (or None if no
+ maintenance windows are scheduled)
+ :rtype: :class:`datetime.timedelta` or ``None``
+ """
+ next_window = self._get_next_maint()
+ if not next_window:
+ return None
+ next_begin = timestamp_to_datetime(next_window['beginTime'])
+ next_end = timestamp_to_datetime(next_window['endTime'])
+ return next_end - next_begin
+
+ def __repr__(self):
+ return '' % (self.id, self.name,
+ self.status)
+
+
+class GCENodeDriver(NodeDriver):
+ """
+ GCE Node Driver class.
+
+ This is the primary driver for interacting with Google Compute Engine. It
+ contains all of the standard libcloud methods, plus additional ex_* methods
+ for more features.
+
+ Note that many methods allow either objects or strings (or lists of
+ objects/strings). In most cases, passing strings instead of objects will
+ result in additional GCE API calls.
+ """
+ connectionCls = GCEConnection
+ api_name = 'googleapis'
+ name = "Google Compute Engine"
+ type = Provider.GCE
+ website = 'https://cloud.google.com/'
+
+ NODE_STATE_MAP = {
+ "PROVISIONING": NodeState.PENDING,
+ "STAGING": NodeState.PENDING,
+ "RUNNING": NodeState.RUNNING,
+ "STOPPED": NodeState.TERMINATED,
+ "STOPPING": NodeState.TERMINATED,
+ "TERMINATED": NodeState.TERMINATED
+ }
+
+ def __init__(self, user_id, key, datacenter=None, project=None,
+ auth_type=None, scopes=None, **kwargs):
+ """
+ :param user_id: The email address (for service accounts) or Client ID
+ (for installed apps) to be used for authentication.
+ :type user_id: ``str``
+
+ :param key: The RSA Key (for service accounts) or file path containing
+ key or Client Secret (for installed apps) to be used for
+ authentication.
+ :type key: ``str``
+
+ :keyword datacenter: The name of the datacenter (zone) used for
+ operations.
+ :type datacenter: ``str``
+
+ :keyword project: Your GCE project name. (required)
+ :type project: ``str``
+
+ :keyword auth_type: Accepted values are "SA" or "IA"
+ ("Service Account" or "Installed Application").
+ If not supplied, auth_type will be guessed based
+ on value of user_id.
+ :type auth_type: ``str``
+
+ :keyword scopes: List of authorization URLs. Default is empty and
+ grants read/write to Compute, Storage, DNS.
+ :type scopes: ``list``
+ """
+ self.auth_type = auth_type
+ self.project = project
+ self.scopes = scopes
+ if not self.project:
+ raise ValueError('Project name must be specified using '
+ '"project" keyword.')
+ super(GCENodeDriver, self).__init__(user_id, key, **kwargs)
+
+ # Cache Zone and Region information to reduce API calls and
+ # increase speed
+ self.base_path = '/compute/%s/projects/%s' % (API_VERSION,
+ self.project)
+ self.zone_list = self.ex_list_zones()
+ self.zone_dict = {}
+ for zone in self.zone_list:
+ self.zone_dict[zone.name] = zone
+ if datacenter:
+ self.zone = self.ex_get_zone(datacenter)
+ else:
+ self.zone = None
+
+ self.region_list = self.ex_list_regions()
+ self.region_dict = {}
+ for region in self.region_list:
+ self.region_dict[region.name] = region
+
+ if self.zone:
+ self.region = self._get_region_from_zone(self.zone)
+ else:
+ self.region = None
+
+ def ex_list_addresses(self, region=None):
+ """
+ Return a list of static addresses for a region or all.
+
+ :keyword region: The region to return addresses from. For example:
+ 'us-central1'. If None, will return addresses from
+ region of self.zone. If 'all', will return all
+ addresses.
+ :type region: ``str`` or ``None``
+
+ :return: A list of static address objects.
+ :rtype: ``list`` of :class:`GCEAddress`
+ """
+ list_addresses = []
+ region = self._set_region(region)
+ if region is None:
+ request = '/aggregated/addresses'
+ else:
+ request = '/regions/%s/addresses' % (region.name)
+ response = self.connection.request(request, method='GET').object
+
+ if 'items' in response:
+ # The aggregated result returns dictionaries for each region
+ if region is None:
+ for v in response['items'].values():
+ region_addresses = [self._to_address(a) for a in
+ v.get('addresses', [])]
+ list_addresses.extend(region_addresses)
+ else:
+ list_addresses = [self._to_address(a) for a in
+ response['items']]
+ return list_addresses
+
+ def ex_list_healthchecks(self):
+ """
+ Return the list of health checks.
+
+ :return: A list of health check objects.
+ :rtype: ``list`` of :class:`GCEHealthCheck`
+ """
+ list_healthchecks = []
+ request = '/global/httpHealthChecks'
+ response = self.connection.request(request, method='GET').object
+ list_healthchecks = [self._to_healthcheck(h) for h in
+ response.get('items', [])]
+ return list_healthchecks
+
+ def ex_list_firewalls(self):
+ """
+ Return the list of firewalls.
+
+ :return: A list of firewall objects.
+ :rtype: ``list`` of :class:`GCEFirewall`
+ """
+ list_firewalls = []
+ request = '/global/firewalls'
+ response = self.connection.request(request, method='GET').object
+ list_firewalls = [self._to_firewall(f) for f in
+ response.get('items', [])]
+ return list_firewalls
+
+ def ex_list_forwarding_rules(self, region=None):
+ """
+ Return the list of forwarding rules for a region or all.
+
+ :keyword region: The region to return forwarding rules from. For
+ example: 'us-central1'. If None, will return
+ forwarding rules from the region of self.region
+ (which is based on self.zone). If 'all', will
+ return all forwarding rules.
+ :type region: ``str`` or :class:`GCERegion` or ``None``
+
+ :return: A list of forwarding rule objects.
+ :rtype: ``list`` of :class:`GCEForwardingRule`
+ """
+ list_forwarding_rules = []
+ region = self._set_region(region)
+ if region is None:
+ request = '/aggregated/forwardingRules'
+ else:
+ request = '/regions/%s/forwardingRules' % (region.name)
+ response = self.connection.request(request, method='GET').object
+
+ if 'items' in response:
+ # The aggregated result returns dictionaries for each region
+ if region is None:
+ for v in response['items'].values():
+ region_forwarding_rules = [self._to_forwarding_rule(f) for
+ f in v.get('forwardingRules',
+ [])]
+ list_forwarding_rules.extend(region_forwarding_rules)
+ else:
+ list_forwarding_rules = [self._to_forwarding_rule(f) for f in
+ response['items']]
+ return list_forwarding_rules
+
+ def list_images(self, ex_project=None):
+ """
+ Return a list of image objects for a project.
+
+ :keyword ex_project: Optional alternate project name.
+ :type ex_project: ``str`` or ``None``
+
+ :return: List of GCENodeImage objects
+ :rtype: ``list`` of :class:`GCENodeImage`
+ """
+ request = '/global/images'
+ if ex_project is None:
+ response = self.connection.request(request, method='GET').object
+ else:
+ # Save the connection request_path
+ save_request_path = self.connection.request_path
+ # Override the connection request path
+ new_request_path = save_request_path.replace(self.project,
+ ex_project)
+ self.connection.request_path = new_request_path
+ response = self.connection.request(request, method='GET').object
+ # Restore the connection request_path
+ self.connection.request_path = save_request_path
+ list_images = [self._to_node_image(i) for i in
+ response.get('items', [])]
+ return list_images
+
+ def list_locations(self):
+ """
+ Return a list of locations (zones).
+
+ The :class:`ex_list_zones` method returns more comprehensive results,
+ but this is here for compatibility.
+
+ :return: List of NodeLocation objects
+ :rtype: ``list`` of :class:`NodeLocation`
+ """
+ list_locations = []
+ request = '/zones'
+ response = self.connection.request(request, method='GET').object
+ list_locations = [self._to_node_location(l) for l in response['items']]
+ return list_locations
+
+ def ex_list_networks(self):
+ """
+ Return the list of networks.
+
+ :return: A list of network objects.
+ :rtype: ``list`` of :class:`GCENetwork`
+ """
+ list_networks = []
+ request = '/global/networks'
+ response = self.connection.request(request, method='GET').object
+ list_networks = [self._to_network(n) for n in
+ response.get('items', [])]
+ return list_networks
+
+ def list_nodes(self, ex_zone=None):
+ """
+ Return a list of nodes in the current zone or all zones.
+
+ :keyword ex_zone: Optional zone name or 'all'
+ :type ex_zone: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :return: List of Node objects
+ :rtype: ``list`` of :class:`Node`
+ """
+ list_nodes = []
+ zone = self._set_zone(ex_zone)
+ if zone is None:
+ request = '/aggregated/instances'
+ else:
+ request = '/zones/%s/instances' % (zone.name)
+
+ response = self.connection.request(request, method='GET').object
+
+ if 'items' in response:
+ # The aggregated response returns a dict for each zone
+ if zone is None:
+ for v in response['items'].values():
+ zone_nodes = [self._to_node(i) for i in
+ v.get('instances', [])]
+ list_nodes.extend(zone_nodes)
+ else:
+ list_nodes = [self._to_node(i) for i in response['items']]
+ return list_nodes
+
+ def ex_list_regions(self):
+ """
+ Return the list of regions.
+
+ :return: A list of region objects.
+ :rtype: ``list`` of :class:`GCERegion`
+ """
+ list_regions = []
+ request = '/regions'
+ response = self.connection.request(request, method='GET').object
+ list_regions = [self._to_region(r) for r in response['items']]
+ return list_regions
+
+ def list_sizes(self, location=None):
+ """
+ Return a list of sizes (machineTypes) in a zone.
+
+ :keyword location: Location or Zone for sizes
+ :type location: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :return: List of GCENodeSize objects
+ :rtype: ``list`` of :class:`GCENodeSize`
+ """
+ list_sizes = []
+ zone = self._set_zone(location)
+ if zone is None:
+ request = '/aggregated/machineTypes'
+ else:
+ request = '/zones/%s/machineTypes' % (zone.name)
+
+ response = self.connection.request(request, method='GET').object
+
+ if 'items' in response:
+ # The aggregated response returns a dict for each zone
+ if zone is None:
+ for v in response['items'].values():
+ zone_sizes = [self._to_node_size(s) for s in
+ v.get('machineTypes', [])]
+ list_sizes.extend(zone_sizes)
+ else:
+ list_sizes = [self._to_node_size(s) for s in response['items']]
+ return list_sizes
+
+ def ex_list_snapshots(self):
+ """
+ Return the list of disk snapshots in the project.
+
+ :return: A list of snapshot objects
+ :rtype: ``list`` of :class:`GCESnapshot`
+ """
+ list_snapshots = []
+ request = '/global/snapshots'
+ response = self.connection.request(request, method='GET').object
+ list_snapshots = [self._to_snapshot(s) for s in
+ response.get('items', [])]
+ return list_snapshots
+
+ def ex_list_targetpools(self, region=None):
+ """
+ Return the list of target pools.
+
+ :return: A list of target pool objects
+ :rtype: ``list`` of :class:`GCETargetPool`
+ """
+ list_targetpools = []
+ region = self._set_region(region)
+ if region is None:
+ request = '/aggregated/targetPools'
+ else:
+ request = '/regions/%s/targetPools' % (region.name)
+ response = self.connection.request(request, method='GET').object
+
+ if 'items' in response:
+ # The aggregated result returns dictionaries for each region
+ if region is None:
+ for v in response['items'].values():
+ region_targetpools = [self._to_targetpool(t) for t in
+ v.get('targetPools', [])]
+ list_targetpools.extend(region_targetpools)
+ else:
+ list_targetpools = [self._to_targetpool(t) for t in
+ response['items']]
+ return list_targetpools
+
+ def list_volumes(self, ex_zone=None):
+ """
+ Return a list of volumes for a zone or all.
+
+ Will return list from provided zone, or from the default zone unless
+ given the value of 'all'.
+
+ :keyword ex_zone: The zone to return volumes from.
+ :type ex_zone: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :return: A list of volume objects.
+ :rtype: ``list`` of :class:`StorageVolume`
+ """
+ list_volumes = []
+ zone = self._set_zone(ex_zone)
+ if zone is None:
+ request = '/aggregated/disks'
+ else:
+ request = '/zones/%s/disks' % (zone.name)
+
+ response = self.connection.request(request, method='GET').object
+ if 'items' in response:
+ # The aggregated response returns a dict for each zone
+ if zone is None:
+ for v in response['items'].values():
+ zone_volumes = [self._to_storage_volume(d) for d in
+ v.get('disks', [])]
+ list_volumes.extend(zone_volumes)
+ else:
+ list_volumes = [self._to_storage_volume(d) for d in
+ response['items']]
+ return list_volumes
+
+ def ex_list_zones(self):
+ """
+ Return the list of zones.
+
+ :return: A list of zone objects.
+ :rtype: ``list`` of :class:`GCEZone`
+ """
+ list_zones = []
+ request = '/zones'
+ response = self.connection.request(request, method='GET').object
+ list_zones = [self._to_zone(z) for z in response['items']]
+ return list_zones
+
+ def ex_create_address(self, name, region=None):
+ """
+ Create a static address in a region.
+
+ :param name: Name of static address
+ :type name: ``str``
+
+ :keyword region: Name of region for the address (e.g. 'us-central1')
+ :type region: ``str`` or :class:`GCERegion`
+
+ :return: Static Address object
+ :rtype: :class:`GCEAddress`
+ """
+ region = region or self.region
+ if not hasattr(region, 'name'):
+ region = self.ex_get_region(region)
+ elif region is None:
+ raise ValueError('REGION_NOT_SPECIFIED',
+ 'Region must be provided for an address')
+ address_data = {'name': name}
+ request = '/regions/%s/addresses' % (region.name)
+ self.connection.async_request(request, method='POST',
+ data=address_data)
+ return self.ex_get_address(name, region=region)
+
+ def ex_create_healthcheck(self, name, host=None, path=None, port=None,
+ interval=None, timeout=None,
+ unhealthy_threshold=None,
+ healthy_threshold=None):
+ """
+ Create an Http Health Check.
+
+ :param name: Name of health check
+ :type name: ``str``
+
+ :keyword host: Hostname of health check request. Defaults to empty
+ and public IP is used instead.
+ :type host: ``str``
+
+ :keyword path: The request path for the check. Defaults to /.
+ :type path: ``str``
+
+ :keyword port: The TCP port number for the check. Defaults to 80.
+ :type port: ``int``
+
+ :keyword interval: How often (in seconds) to check. Defaults to 5.
+ :type interval: ``int``
+
+ :keyword timeout: How long to wait before failing. Defaults to 5.
+ :type timeout: ``int``
+
+ :keyword unhealthy_threshold: How many failures before marking
+ unhealthy. Defaults to 2.
+ :type unhealthy_threshold: ``int``
+
+ :keyword healthy_threshold: How many successes before marking as
+ healthy. Defaults to 2.
+ :type healthy_threshold: ``int``
+
+ :return: Health Check object
+ :rtype: :class:`GCEHealthCheck`
+ """
+ hc_data = {}
+ hc_data['name'] = name
+ if host:
+ hc_data['host'] = host
+ # As of right now, the 'default' values aren't getting set when called
+ # through the API, so set them explicitly
+ hc_data['requestPath'] = path or '/'
+ hc_data['port'] = port or 80
+ hc_data['checkIntervalSec'] = interval or 5
+ hc_data['timeoutSec'] = timeout or 5
+ hc_data['unhealthyThreshold'] = unhealthy_threshold or 2
+ hc_data['healthyThreshold'] = healthy_threshold or 2
+
+ request = '/global/httpHealthChecks'
+
+ self.connection.async_request(request, method='POST', data=hc_data)
+ return self.ex_get_healthcheck(name)
+
+ def ex_create_firewall(self, name, allowed, network='default',
+ source_ranges=None, source_tags=None,
+ target_tags=None):
+ """
+ Create a firewall on a network.
+
+ Firewall rules should be supplied in the "allowed" field. This is a
+ list of dictionaries formated like so ("ports" is optional)::
+
+ [{"IPProtocol": "",
+ "ports": ""}]
+
+ For example, to allow tcp on port 8080 and udp on all ports, 'allowed'
+ would be::
+
+ [{"IPProtocol": "tcp",
+ "ports": ["8080"]},
+ {"IPProtocol": "udp"}]
+
+ See `Firewall Reference `_ for more information.
+
+ :param name: Name of the firewall to be created
+ :type name: ``str``
+
+ :param allowed: List of dictionaries with rules
+ :type allowed: ``list`` of ``dict``
+
+ :keyword network: The network that the firewall applies to.
+ :type network: ``str`` or :class:`GCENetwork`
+
+ :keyword source_ranges: A list of IP ranges in CIDR format that the
+ firewall should apply to. Defaults to
+ ['0.0.0.0/0']
+ :type source_ranges: ``list`` of ``str``
+
+ :keyword source_tags: A list of source instance tags the rules apply
+ to.
+ :type source_tags: ``list`` of ``str``
+
+ :keyword target_tags: A list of target instance tags the rules apply
+ to.
+ :type target_tags: ``list`` of ``str``
+
+ :return: Firewall object
+ :rtype: :class:`GCEFirewall`
+ """
+ firewall_data = {}
+ if not hasattr(network, 'name'):
+ nw = self.ex_get_network(network)
+ else:
+ nw = network
+
+ firewall_data['name'] = name
+ firewall_data['allowed'] = allowed
+ firewall_data['network'] = nw.extra['selfLink']
+ firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0']
+ if source_tags is not None:
+ firewall_data['sourceTags'] = source_tags
+ if target_tags is not None:
+ firewall_data['targetTags'] = target_tags
+
+ request = '/global/firewalls'
+
+ self.connection.async_request(request, method='POST',
+ data=firewall_data)
+ return self.ex_get_firewall(name)
+
+ def ex_create_forwarding_rule(self, name, targetpool, region=None,
+ protocol='tcp', port_range=None,
+ address=None):
+ """
+ Create a forwarding rule.
+
+ :param name: Name of forwarding rule to be created
+ :type name: ``str``
+
+ :param targetpool: Target pool to apply the rule to
+ :param targetpool: ``str`` or :class:`GCETargetPool`
+
+ :keyword region: Region to create the forwarding rule in. Defaults to
+ self.region
+ :type region: ``str`` or :class:`GCERegion`
+
+ :keyword protocol: Should be 'tcp' or 'udp'
+ :type protocol: ``str``
+
+ :keyword port_range: Optional single port number or range separated
+ by a dash. Examples: '80', '5000-5999'.
+ :type port_range: ``str``
+
+ :keyword address: Optional static address for forwarding rule. Must be
+ in same region.
+ :type address: ``str`` or :class:`GCEAddress`
+
+ :return: Forwarding Rule object
+ :rtype: :class:`GCEForwardingRule`
+ """
+ forwarding_rule_data = {}
+ region = region or self.region
+ if not hasattr(region, 'name'):
+ region = self.ex_get_region(region)
+ if not hasattr(targetpool, 'name'):
+ targetpool = self.ex_get_targetpool(targetpool, region)
+
+ forwarding_rule_data['name'] = name
+ forwarding_rule_data['region'] = region.extra['selfLink']
+ forwarding_rule_data['target'] = targetpool.extra['selfLink']
+ forwarding_rule_data['protocol'] = protocol.upper()
+ if address:
+ if not hasattr(address, 'name'):
+ address = self.ex_get_address(address, region)
+ forwarding_rule_data['IPAddress'] = address.extra['selfLink']
+ if port_range:
+ forwarding_rule_data['portRange'] = port_range
+
+ request = '/regions/%s/forwardingRules' % (region.name)
+
+ self.connection.async_request(request, method='POST',
+ data=forwarding_rule_data)
+
+ return self.ex_get_forwarding_rule(name)
+
+ def ex_create_network(self, name, cidr):
+ """
+ Create a network.
+
+ :param name: Name of network to be created
+ :type name: ``str``
+
+ :param cidr: Address range of network in CIDR format.
+ :type cidr: ``str``
+
+ :return: Network object
+ :rtype: :class:`GCENetwork`
+ """
+ network_data = {}
+ network_data['name'] = name
+ network_data['IPv4Range'] = cidr
+
+ request = '/global/networks'
+
+ self.connection.async_request(request, method='POST',
+ data=network_data)
+
+ return self.ex_get_network(name)
+
+ def create_node(self, name, size, image, location=None,
+ ex_network='default', ex_tags=None, ex_metadata=None,
+ ex_boot_disk=None, use_existing_disk=True,
+ external_ip='ephemeral'):
+ """
+ Create a new node and return a node object for the node.
+
+ :param name: The name of the node to create.
+ :type name: ``str``
+
+ :param size: The machine type to use.
+ :type size: ``str`` or :class:`GCENodeSize`
+
+ :param image: The image to use to create the node (or, if attaching
+ a persistent disk, the image used to create the disk)
+ :type image: ``str`` or :class:`GCENodeImage`
+
+ :keyword location: The location (zone) to create the node in.
+ :type location: ``str`` or :class:`NodeLocation` or
+ :class:`GCEZone` or ``None``
+
+ :keyword ex_network: The network to associate with the node.
+ :type ex_network: ``str`` or :class:`GCENetwork`
+
+ :keyword ex_tags: A list of tags to associate with the node.
+ :type ex_tags: ``list`` of ``str`` or ``None``
+
+ :keyword ex_metadata: Metadata dictionary for instance.
+ :type ex_metadata: ``dict`` or ``None``
+
+ :keyword ex_boot_disk: The boot disk to attach to the instance.
+ :type ex_boot_disk: :class:`StorageVolume` or ``str``
+
+ :keyword use_existing_disk: If True and if an existing disk with the
+ same name/location is found, use that
+ disk instead of creating a new one.
+ :type use_existing_disk: ``bool``
+
+ :keyword external_ip: The external IP address to use. If 'ephemeral'
+ (default), a new non-static address will be
+ used. If 'None', then no external address will
+ be used. To use an existing static IP address,
+ a GCEAddress object should be passed in.
+ :type external_ip: :class:`GCEAddress` or ``str`` or None
+
+ :return: A Node object for the new node.
+ :rtype: :class:`Node`
+ """
+ location = location or self.zone
+ if not hasattr(location, 'name'):
+ location = self.ex_get_zone(location)
+ if not hasattr(size, 'name'):
+ size = self.ex_get_size(size, location)
+ if not hasattr(ex_network, 'name'):
+ ex_network = self.ex_get_network(ex_network)
+ if not hasattr(image, 'name'):
+ image = self.ex_get_image(image)
+
+ if not ex_boot_disk:
+ ex_boot_disk = self.create_volume(None, name, location=location,
+ image=image,
+ use_existing=use_existing_disk)
+
+ if ex_metadata is not None:
+ ex_metadata = {"items": [{"key": k, "value": v}
+ for k, v in ex_metadata.items()]}
+
+ request, node_data = self._create_node_req(name, size, image,
+ location, ex_network,
+ ex_tags, ex_metadata,
+ ex_boot_disk, external_ip)
+ self.connection.async_request(request, method='POST', data=node_data)
+
+ return self.ex_get_node(name, location.name)
+
+ def ex_create_multiple_nodes(self, base_name, size, image, number,
+ location=None, ex_network='default',
+ ex_tags=None, ex_metadata=None,
+ ignore_errors=True, use_existing_disk=True,
+ poll_interval=2, external_ip='ephemeral',
+ timeout=DEFAULT_TASK_COMPLETION_TIMEOUT):
+ """
+ Create multiple nodes and return a list of Node objects.
+
+ Nodes will be named with the base name and a number. For example, if
+ the base name is 'libcloud' and you create 3 nodes, they will be
+ named::
+
+ libcloud-000
+ libcloud-001
+ libcloud-002
+
+ :param base_name: The base name of the nodes to create.
+ :type base_name: ``str``
+
+ :param size: The machine type to use.
+ :type size: ``str`` or :class:`GCENodeSize`
+
+ :param image: The image to use to create the nodes.
+ :type image: ``str`` or :class:`GCENodeImage`
+
+ :param number: The number of nodes to create.
+ :type number: ``int``
+
+ :keyword location: The location (zone) to create the nodes in.
+ :type location: ``str`` or :class:`NodeLocation` or
+ :class:`GCEZone` or ``None``
+
+ :keyword ex_network: The network to associate with the nodes.
+ :type ex_network: ``str`` or :class:`GCENetwork`
+
+ :keyword ex_tags: A list of tags to assiciate with the nodes.
+ :type ex_tags: ``list`` of ``str`` or ``None``
+
+ :keyword ex_metadata: Metadata dictionary for instances.
+ :type ex_metadata: ``dict`` or ``None``
+
+ :keyword ignore_errors: If True, don't raise Exceptions if one or
+ more nodes fails.
+ :type ignore_errors: ``bool``
+
+ :keyword use_existing_disk: If True and if an existing disk with the
+ same name/location is found, use that
+ disk instead of creating a new one.
+ :type use_existing_disk: ``bool``
+
+ :keyword poll_interval: Number of seconds between status checks.
+ :type poll_interval: ``int``
+
+ :keyword external_ip: The external IP address to use. If 'ephemeral'
+ (default), a new non-static address will be
+ used. If 'None', then no external address will
+ be used. (Static addresses are not supported for
+ multiple node creation.)
+ :type external_ip: ``str`` or None
+
+ :keyword timeout: The number of seconds to wait for all nodes to be
+ created before timing out.
+ :type timeout: ``int``
+
+ :return: A list of Node objects for the new nodes.
+ :rtype: ``list`` of :class:`Node`
+ """
+ location = location or self.zone
+ if not hasattr(location, 'name'):
+ location = self.ex_get_zone(location)
+ if not hasattr(size, 'name'):
+ size = self.ex_get_size(size, location)
+ if not hasattr(ex_network, 'name'):
+ ex_network = self.ex_get_network(ex_network)
+ if not hasattr(image, 'name'):
+ image = self.ex_get_image(image)
+
+ node_attrs = {'size': size,
+ 'image': image,
+ 'location': location,
+ 'network': ex_network,
+ 'tags': ex_tags,
+ 'metadata': ex_metadata,
+ 'ignore_errors': ignore_errors,
+ 'use_existing_disk': use_existing_disk,
+ 'external_ip': external_ip}
+
+ # List for holding the status information for disk/node creation.
+ status_list = []
+
+ for i in range(number):
+ name = '%s-%03d' % (base_name, i)
+
+ status = {'name': name,
+ 'node_response': None,
+ 'node': None,
+ 'disk_response': None,
+ 'disk': None}
+
+ status_list.append(status)
+
+ # Create disks for nodes
+ for status in status_list:
+ self._multi_create_disk(status, node_attrs)
+
+ start_time = time.time()
+ complete = False
+ while not complete:
+ if (time.time() - start_time >= timeout):
+ raise Exception("Timeout (%s sec) while waiting for multiple "
+ "instances")
+ complete = True
+ time.sleep(poll_interval)
+ for status in status_list:
+ # If disk does not yet exist, check on its status
+ if not status['disk']:
+ self._multi_check_disk(status, node_attrs)
+
+ # If disk exists, but node does not, create the node or check
+ # on its status if already in progress.
+ if status['disk'] and not status['node']:
+ if not status['node_response']:
+ self._multi_create_node(status, node_attrs)
+ else:
+ self._multi_check_node(status, node_attrs)
+ # If any of the nodes have not been created (or failed) we are
+ # not done yet.
+ if not status['node']:
+ complete = False
+
+ # Return list of nodes
+ node_list = []
+ for status in status_list:
+ node_list.append(status['node'])
+ return node_list
+
+ def ex_create_targetpool(self, name, region=None, healthchecks=None,
+ nodes=None):
+ """
+ Create a target pool.
+
+ :param name: Name of target pool
+ :type name: ``str``
+
+ :keyword region: Region to create the target pool in. Defaults to
+ self.region
+ :type region: ``str`` or :class:`GCERegion` or ``None``
+
+ :keyword healthchecks: Optional list of health checks to attach
+ :type healthchecks: ``list`` of ``str`` or :class:`GCEHealthCheck`
+
+ :keyword nodes: Optional list of nodes to attach to the pool
+ :type nodes: ``list`` of ``str`` or :class:`Node`
+
+ :return: Target Pool object
+ :rtype: :class:`GCETargetPool`
+ """
+ region = region or self.region
+ targetpool_data = {}
+ targetpool_data['name'] = name
+ if not hasattr(region, 'name'):
+ region = self.ex_get_region(region)
+ targetpool_data['region'] = region.extra['selfLink']
+
+ if healthchecks:
+ if not hasattr(healthchecks[0], 'name'):
+ hc_list = [self.ex_get_healthcheck(h).extra['selfLink'] for h
+ in healthchecks]
+ else:
+ hc_list = [h.extra['selfLink'] for h in healthchecks]
+ targetpool_data['healthChecks'] = hc_list
+ if nodes:
+ if not hasattr(nodes[0], 'name'):
+ node_list = [self.ex_get_node(n, 'all').extra['selfLink'] for n
+ in nodes]
+ else:
+ node_list = [n.extra['selfLink'] for n in nodes]
+ targetpool_data['instances'] = node_list
+
+ request = '/regions/%s/targetPools' % (region.name)
+
+ self.connection.async_request(request, method='POST',
+ data=targetpool_data)
+
+ return self.ex_get_targetpool(name, region)
+
+ def create_volume(self, size, name, location=None, snapshot=None,
+ image=None, use_existing=True):
+ """
+ Create a volume (disk).
+
+ :param size: Size of volume to create (in GB). Can be None if image
+ or snapshot is supplied.
+ :type size: ``int`` or ``str`` or ``None``
+
+ :param name: Name of volume to create
+ :type name: ``str``
+
+ :keyword location: Location (zone) to create the volume in
+ :type location: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :keyword snapshot: Snapshot to create image from
+ :type snapshot: :class:`GCESnapshot` or ``str`` or ``None``
+
+ :keyword image: Image to create disk from.
+ :type image: :class:`GCENodeImage` or ``str`` or ``None``
+
+ :keyword use_existing: If True and a disk with the given name already
+ exists, return an object for that disk instead
+ of attempting to create a new disk.
+ :type use_existing: ``bool``
+
+ :return: Storage Volume object
+ :rtype: :class:`StorageVolume`
+ """
+ request, volume_data, params = self._create_vol_req(
+ size, name, location, snapshot, image)
+ try:
+ self.connection.async_request(request, method='POST',
+ data=volume_data, params=params)
+ except ResourceExistsError:
+ e = sys.exc_info()[1]
+ if not use_existing:
+ raise e
+
+ return self.ex_get_volume(name, location)
+
+ def create_volume_snapshot(self, volume, name):
+ """
+ Create a snapshot of the provided Volume.
+
+ :param volume: A StorageVolume object
+ :type volume: :class:`StorageVolume`
+
+ :return: A GCE Snapshot object
+ :rtype: :class:`GCESnapshot`
+ """
+ snapshot_data = {}
+ snapshot_data['name'] = name
+ request = '/zones/%s/disks/%s/createSnapshot' % (
+ volume.extra['zone'].name, volume.name)
+ self.connection.async_request(request, method='POST',
+ data=snapshot_data)
+
+ return self.ex_get_snapshot(name)
+
+ def list_volume_snapshots(self, volume):
+ """
+ List snapshots created from the provided volume.
+
+ For GCE, snapshots are global, but while the volume they were
+ created from still exists, the source disk for the snapshot is
+ tracked.
+
+ :param volume: A StorageVolume object
+ :type volume: :class:`StorageVolume`
+
+ :return: A list of Snapshot objects
+ :rtype: ``list`` of :class:`GCESnapshot`
+ """
+ volume_snapshots = []
+ volume_link = volume.extra['selfLink']
+ all_snapshots = self.ex_list_snapshots()
+ for snapshot in all_snapshots:
+ if snapshot.extra['sourceDisk'] == volume_link:
+ volume_snapshots.append(snapshot)
+ return volume_snapshots
+
+ def ex_update_healthcheck(self, healthcheck):
+ """
+ Update a health check with new values.
+
+ To update, change the attributes of the health check object and pass
+ the updated object to the method.
+
+ :param healthcheck: A healthcheck object with updated values.
+ :type healthcheck: :class:`GCEHealthCheck`
+
+ :return: An object representing the new state of the health check.
+ :rtype: :class:`GCEHealthCheck`
+ """
+ hc_data = {}
+ hc_data['name'] = healthcheck.name
+ hc_data['requestPath'] = healthcheck.path
+ hc_data['port'] = healthcheck.port
+ hc_data['checkIntervalSec'] = healthcheck.interval
+ hc_data['timeoutSec'] = healthcheck.timeout
+ hc_data['unhealthyThreshold'] = healthcheck.unhealthy_threshold
+ hc_data['healthyThreshold'] = healthcheck.healthy_threshold
+ if healthcheck.extra['host']:
+ hc_data['host'] = healthcheck.extra['host']
+ if healthcheck.extra['description']:
+ hc_data['description'] = healthcheck.extra['description']
+
+ request = '/global/httpHealthChecks/%s' % (healthcheck.name)
+
+ self.connection.async_request(request, method='PUT',
+ data=hc_data)
+
+ return self.ex_get_healthcheck(healthcheck.name)
+
+ def ex_update_firewall(self, firewall):
+ """
+ Update a firewall with new values.
+
+ To update, change the attributes of the firewall object and pass the
+ updated object to the method.
+
+ :param firewall: A firewall object with updated values.
+ :type firewall: :class:`GCEFirewall`
+
+ :return: An object representing the new state of the firewall.
+ :rtype: :class:`GCEFirewall`
+ """
+ firewall_data = {}
+ firewall_data['name'] = firewall.name
+ firewall_data['allowed'] = firewall.allowed
+ firewall_data['network'] = firewall.network.extra['selfLink']
+ if firewall.source_ranges:
+ firewall_data['sourceRanges'] = firewall.source_ranges
+ if firewall.source_tags:
+ firewall_data['sourceTags'] = firewall.source_tags
+ if firewall.target_tags:
+ firewall_data['targetTags'] = firewall.target_tags
+ if firewall.extra['description']:
+ firewall_data['description'] = firewall.extra['description']
+
+ request = '/global/firewalls/%s' % (firewall.name)
+
+ self.connection.async_request(request, method='PUT',
+ data=firewall_data)
+
+ return self.ex_get_firewall(firewall.name)
+
+ def ex_targetpool_add_node(self, targetpool, node):
+ """
+ Add a node to a target pool.
+
+ :param targetpool: The targetpool to add node to
+ :type targetpool: ``str`` or :class:`GCETargetPool`
+
+ :param node: The node to add
+ :type node: ``str`` or :class:`Node`
+
+ :returns: True if successful
+ :rtype: ``bool``
+ """
+ if not hasattr(targetpool, 'name'):
+ targetpool = self.ex_get_targetpool(targetpool)
+ if not hasattr(node, 'name'):
+ node = self.ex_get_node(node, 'all')
+
+ targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]}
+
+ request = '/regions/%s/targetPools/%s/addInstance' % (
+ targetpool.region.name, targetpool.name)
+ self.connection.async_request(request, method='POST',
+ data=targetpool_data)
+ targetpool.nodes.append(node)
+ return True
+
+ def ex_targetpool_add_healthcheck(self, targetpool, healthcheck):
+ """
+ Add a health check to a target pool.
+
+ :param targetpool: The targetpool to add health check to
+ :type targetpool: ``str`` or :class:`GCETargetPool`
+
+ :param healthcheck: The healthcheck to add
+ :type healthcheck: ``str`` or :class:`GCEHealthCheck`
+
+ :returns: True if successful
+ :rtype: ``bool``
+ """
+ if not hasattr(targetpool, 'name'):
+ targetpool = self.ex_get_targetpool(targetpool)
+ if not hasattr(healthcheck, 'name'):
+ healthcheck = self.ex_get_healthcheck(healthcheck)
+
+ targetpool_data = {'healthCheck': healthcheck.extra['selfLink']}
+
+ request = '/regions/%s/targetPools/%s/addHealthCheck' % (
+ targetpool.region.name, targetpool.name)
+ self.connection.async_request(request, method='POST',
+ data=targetpool_data)
+ targetpool.healthchecks.append(healthcheck)
+ return True
+
+ def ex_targetpool_remove_node(self, targetpool, node):
+ """
+ Remove a node from a target pool.
+
+ :param targetpool: The targetpool to remove node from
+ :type targetpool: ``str`` or :class:`GCETargetPool`
+
+ :param node: The node to remove
+ :type node: ``str`` or :class:`Node`
+
+ :returns: True if successful
+ :rtype: ``bool``
+ """
+ if not hasattr(targetpool, 'name'):
+ targetpool = self.ex_get_targetpool(targetpool)
+ if not hasattr(node, 'name'):
+ node = self.ex_get_node(node, 'all')
+
+ targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]}
+
+ request = '/regions/%s/targetPools/%s/removeInstance' % (
+ targetpool.region.name, targetpool.name)
+ self.connection.async_request(request, method='POST',
+ data=targetpool_data)
+ # Remove node object from node list
+ index = None
+ for i, nd in enumerate(targetpool.nodes):
+ if nd.name == node.name:
+ index = i
+ break
+ if index is not None:
+ targetpool.nodes.pop(index)
+ return True
+
+ def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck):
+ """
+ Remove a health check from a target pool.
+
+ :param targetpool: The targetpool to remove health check from
+ :type targetpool: ``str`` or :class:`GCETargetPool`
+
+ :param healthcheck: The healthcheck to remove
+ :type healthcheck: ``str`` or :class:`GCEHealthCheck`
+
+ :returns: True if successful
+ :rtype: ``bool``
+ """
+ if not hasattr(targetpool, 'name'):
+ targetpool = self.ex_get_targetpool(targetpool)
+ if not hasattr(healthcheck, 'name'):
+ healthcheck = self.ex_get_healthcheck(healthcheck)
+
+ targetpool_data = {'healthCheck': healthcheck.extra['selfLink']}
+
+ request = '/regions/%s/targetPools/%s/removeHealthCheck' % (
+ targetpool.region.name, targetpool.name)
+ self.connection.async_request(request, method='POST',
+ data=targetpool_data)
+ # Remove healthcheck object from healthchecks list
+ index = None
+ for i, hc in enumerate(targetpool.healthchecks):
+ if hc.name == healthcheck.name:
+ index = i
+ if index is not None:
+ targetpool.healthchecks.pop(index)
+ return True
+
+ def reboot_node(self, node):
+ """
+ Reboot a node.
+
+ :param node: Node to be rebooted
+ :type node: :class:`Node`
+
+ :return: True if successful, False if not
+ :rtype: ``bool``
+ """
+ request = '/zones/%s/instances/%s/reset' % (node.extra['zone'].name,
+ node.name)
+ self.connection.async_request(request, method='POST',
+ data='ignored')
+ return True
+
+ def ex_set_node_tags(self, node, tags):
+ """
+ Set the tags on a Node instance.
+
+ Note that this updates the node object directly.
+
+ :param node: Node object
+ :type node: :class:`Node`
+
+ :param tags: List of tags to apply to the object
+ :type tags: ``list`` of ``str``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/zones/%s/instances/%s/setTags' % (node.extra['zone'].name,
+ node.name)
+
+ tags_data = {}
+ tags_data['items'] = tags
+ tags_data['fingerprint'] = node.extra['tags_fingerprint']
+
+ self.connection.async_request(request, method='POST',
+ data=tags_data)
+ new_node = self.ex_get_node(node.name, node.extra['zone'])
+ node.extra['tags'] = new_node.extra['tags']
+ node.extra['tags_fingerprint'] = new_node.extra['tags_fingerprint']
+ return True
+
+ def ex_set_node_scheduling(self, node, on_host_maintenance=None,
+ automatic_restart=None):
+ """Set the maintenance behavior for the node.
+
+ See `Scheduling `_ documentation for more info.
+
+ :param node: Node object
+ :type node: :class:`Node`
+
+ :keyword on_host_maintenance: Defines whether node should be
+ terminated or migrated when host machine
+ goes down. Acceptable values are:
+ 'MIGRATE' or 'TERMINATE' (If not
+ supplied, value will be reset to GCE
+ default value for the instance type.)
+ :type on_host_maintenance: ``str``
+
+ :keyword automatic_restart: Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine. (If not
+ supplied, value will be set to the GCE
+ default value for the instance type.)
+ :type automatic_restart: ``bool``
+
+ :return: True if successful.
+ :rtype: ``bool``
+ """
+ if not hasattr(node, 'name'):
+ node = self.ex_get_node(node, 'all')
+ if on_host_maintenance is not None:
+ on_host_maintenance = on_host_maintenance.upper()
+ ohm_values = ['MIGRATE', 'TERMINATE']
+ if on_host_maintenance not in ohm_values:
+ raise ValueError('on_host_maintenance must be one of %s' %
+ ','.join(ohm_values))
+
+ request = '/zones/%s/instances/%s/setScheduling' % (
+ node.extra['zone'].name, node.name)
+
+ scheduling_data = {}
+ if on_host_maintenance is not None:
+ scheduling_data['onHostMaintenance'] = on_host_maintenance
+ if automatic_restart is not None:
+ scheduling_data['automaticRestart'] = automatic_restart
+
+ self.connection.async_request(request, method='POST',
+ data=scheduling_data)
+
+ new_node = self.ex_get_node(node.name, node.extra['zone'])
+ node.extra['scheduling'] = new_node.extra['scheduling']
+
+ ohm = node.extra['scheduling'].get('onHostMaintenance')
+ ar = node.extra['scheduling'].get('automaticRestart')
+
+ success = True
+ if on_host_maintenance not in [None, ohm]:
+ success = False
+ if automatic_restart not in [None, ar]:
+ success = False
+
+ return success
+
+ def deploy_node(self, name, size, image, script, location=None,
+ ex_network='default', ex_tags=None):
+ """
+ Create a new node and run a script on start-up.
+
+ :param name: The name of the node to create.
+ :type name: ``str``
+
+ :param size: The machine type to use.
+ :type size: ``str`` or :class:`GCENodeSize`
+
+ :param image: The image to use to create the node.
+ :type image: ``str`` or :class:`GCENodeImage`
+
+ :param script: File path to start-up script
+ :type script: ``str``
+
+ :keyword location: The location (zone) to create the node in.
+ :type location: ``str`` or :class:`NodeLocation` or
+ :class:`GCEZone` or ``None``
+
+ :keyword ex_network: The network to associate with the node.
+ :type ex_network: ``str`` or :class:`GCENetwork`
+
+ :keyword ex_tags: A list of tags to associate with the node.
+ :type ex_tags: ``list`` of ``str`` or ``None``
+
+ :return: A Node object for the new node.
+ :rtype: :class:`Node`
+ """
+ with open(script, 'r') as f:
+ script_data = f.read()
+ metadata = {'items': [{'key': 'startup-script',
+ 'value': script_data}]}
+
+ return self.create_node(name, size, image, location=location,
+ ex_network=ex_network, ex_tags=ex_tags,
+ ex_metadata=metadata)
+
+ def attach_volume(self, node, volume, device=None, ex_mode=None,
+ ex_boot=False):
+ """
+ Attach a volume to a node.
+
+ If volume is None, a scratch disk will be created and attached.
+
+ :param node: The node to attach the volume to
+ :type node: :class:`Node`
+
+ :param volume: The volume to attach. If none, a scratch disk will be
+ attached.
+ :type volume: :class:`StorageVolume` or ``None``
+
+ :keyword device: The device name to attach the volume as. Defaults to
+ volume name.
+ :type device: ``str``
+
+ :keyword ex_mode: Either 'READ_WRITE' or 'READ_ONLY'
+ :type ex_mode: ``str``
+
+ :keyword ex_boot: If true, disk will be attached as a boot disk
+ :type ex_boot: ``bool``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ volume_data = {}
+ if volume is None:
+ volume_data['type'] = 'SCRATCH'
+ else:
+ volume_data['type'] = 'PERSISTENT'
+ volume_data['source'] = volume.extra['selfLink']
+ volume_data['kind'] = 'compute#attachedDisk'
+ volume_data['mode'] = ex_mode or 'READ_WRITE'
+
+ if device:
+ volume_data['deviceName'] = device
+ else:
+ volume_data['deviceName'] = volume.name
+
+ volume_data['boot'] = ex_boot
+
+ request = '/zones/%s/instances/%s/attachDisk' % (
+ node.extra['zone'].name, node.name)
+ self.connection.async_request(request, method='POST',
+ data=volume_data)
+ return True
+
+ def detach_volume(self, volume, ex_node=None):
+ """
+ Detach a volume from a node.
+
+ :param volume: Volume object to detach
+ :type volume: :class:`StorageVolume`
+
+ :keyword ex_node: Node object to detach volume from (required)
+ :type ex_node: :class:`Node`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ if not ex_node:
+ return False
+ request = '/zones/%s/instances/%s/detachDisk?deviceName=%s' % (
+ ex_node.extra['zone'].name, ex_node.name, volume.name)
+
+ self.connection.async_request(request, method='POST',
+ data='ignored')
+ return True
+
+ def ex_set_volume_auto_delete(self, volume, node, auto_delete=True):
+ """
+ Sets the auto-delete flag for a volume attached to a node.
+
+ :param volume: Volume object to auto-delete
+ :type volume: :class:`StorageVolume`
+
+ :param ex_node: Node object to auto-delete volume from
+ :type ex_node: :class:`Node`
+
+ :keyword auto_delete: Flag to set for the auto-delete value
+ :type auto_delete: ``bool`` (default True)
+
+ :return: True if successfull
+ :rtype: ``bool``
+ """
+ request = '/zones/%s/instances/%s/setDiskAutoDelete' % (
+ node.extra['zone'].name, node.name
+ )
+ delete_params = {
+ 'deviceName': volume,
+ 'autoDelete': auto_delete,
+ }
+ self.connection.async_request(request, method='POST',
+ params=delete_params)
+ return True
+
+ def ex_destroy_address(self, address):
+ """
+ Destroy a static address.
+
+ :param address: Address object to destroy
+ :type address: :class:`GCEAddress`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/regions/%s/addresses/%s' % (address.region.name,
+ address.name)
+
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def ex_delete_image(self, image):
+ """
+ Delete a specific image resource.
+
+ :param image: Image object to delete
+ :type image: ``str`` or :class:`GCENodeImage`
+
+ :return: True if successfull
+ :rtype: ``bool``
+ """
+ if not hasattr(image, 'name'):
+ image = self.ex_get_image(image)
+
+ request = '/global/images/%s' % (image.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def ex_deprecate_image(self, image, replacement, state=None):
+ """
+ Deprecate a specific image resource.
+
+ :param image: Image object to deprecate
+ :type image: ``str`` or :class: `GCENodeImage`
+
+ :param replacement: Image object to use as a replacement
+ :type replacement: ``str`` or :class: `GCENodeImage`
+
+ :param state: State of the image
+ :type state: ``str``
+
+ :return: True if successfull
+ :rtype: ``bool``
+ """
+ if not hasattr(image, 'name'):
+ image = self.ex_get_image(image)
+
+ if not hasattr(replacement, 'name'):
+ replacement = self.ex_get_image(replacement)
+
+ if state is None:
+ state = 'DEPRECATED'
+
+ possible_states = ['DELETED', 'DEPRECATED', 'OBSOLETE']
+
+ if state not in possible_states:
+ raise ValueError('state must be one of %s'
+ % ','.join(possible_states))
+
+ image_data = {
+ 'state': state,
+ 'replacement': replacement.extra['selfLink'],
+ }
+
+ request = '/global/images/%s/deprecate' % (image.name)
+
+ self.connection.request(
+ request, method='POST', data=image_data).object
+
+ return True
+
+ def ex_destroy_healthcheck(self, healthcheck):
+ """
+ Destroy a healthcheck.
+
+ :param healthcheck: Health check object to destroy
+ :type healthcheck: :class:`GCEHealthCheck`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/global/httpHealthChecks/%s' % (healthcheck.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def ex_destroy_firewall(self, firewall):
+ """
+ Destroy a firewall.
+
+ :param firewall: Firewall object to destroy
+ :type firewall: :class:`GCEFirewall`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/global/firewalls/%s' % (firewall.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def ex_destroy_forwarding_rule(self, forwarding_rule):
+ """
+ Destroy a forwarding rule.
+
+ :param forwarding_rule: Forwarding Rule object to destroy
+ :type forwarding_rule: :class:`GCEForwardingRule`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/regions/%s/forwardingRules/%s' % (
+ forwarding_rule.region.name, forwarding_rule.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def ex_destroy_network(self, network):
+ """
+ Destroy a network.
+
+ :param network: Network object to destroy
+ :type network: :class:`GCENetwork`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/global/networks/%s' % (network.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def destroy_node(self, node, destroy_boot_disk=False):
+ """
+ Destroy a node.
+
+ :param node: Node object to destroy
+ :type node: :class:`Node`
+
+ :keyword destroy_boot_disk: If true, also destroy the node's
+ boot disk. (Note that this keyword is not
+ accessible from the node's .destroy()
+ method.)
+ :type destroy_boot_disk: ``bool``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/zones/%s/instances/%s' % (node.extra['zone'].name,
+ node.name)
+ self.connection.async_request(request, method='DELETE')
+ if destroy_boot_disk and node.extra['boot_disk']:
+ node.extra['boot_disk'].destroy()
+ return True
+
+ def ex_destroy_multiple_nodes(self, node_list, ignore_errors=True,
+ destroy_boot_disk=False, poll_interval=2,
+ timeout=DEFAULT_TASK_COMPLETION_TIMEOUT):
+ """
+ Destroy multiple nodes at once.
+
+ :param node_list: List of nodes to destroy
+ :type node_list: ``list`` of :class:`Node`
+
+ :keyword ignore_errors: If true, don't raise an exception if one or
+ more nodes fails to be destroyed.
+ :type ignore_errors: ``bool``
+
+ :keyword destroy_boot_disk: If true, also destroy the nodes' boot
+ disks.
+ :type destroy_boot_disk: ``bool``
+
+ :keyword poll_interval: Number of seconds between status checks.
+ :type poll_interval: ``int``
+
+ :keyword timeout: Number of seconds to wait for all nodes to be
+ destroyed.
+ :type timeout: ``int``
+
+ :return: A list of boolean values. One for each node. True means
+ that the node was successfully destroyed.
+ :rtype: ``list`` of ``bool``
+ """
+ status_list = []
+ complete = False
+ start_time = time.time()
+ for node in node_list:
+ request = '/zones/%s/instances/%s' % (node.extra['zone'].name,
+ node.name)
+ try:
+ response = self.connection.request(request,
+ method='DELETE').object
+ except GoogleBaseError:
+ self._catch_error(ignore_errors=ignore_errors)
+ response = None
+
+ status = {'node': node,
+ 'node_success': False,
+ 'node_response': response,
+ 'disk_success': not destroy_boot_disk,
+ 'disk_response': None}
+
+ status_list.append(status)
+
+ while not complete:
+ if (time.time() - start_time >= timeout):
+ raise Exception("Timeout (%s sec) while waiting to delete "
+ "multiple instances")
+ complete = True
+ for status in status_list:
+ # If one of the operations is running, check the status
+ operation = status['node_response'] or status['disk_response']
+ delete_disk = False
+ if operation:
+ no_errors = True
+ try:
+ response = self.connection.request(
+ operation['selfLink']).object
+ except GoogleBaseError:
+ self._catch_error(ignore_errors=ignore_errors)
+ no_errors = False
+ response = {'status': 'DONE'}
+ if response['status'] == 'DONE':
+ # If a node was deleted, update status and indicate
+ # that the disk is ready to be deleted.
+ if status['node_response']:
+ status['node_response'] = None
+ status['node_success'] = no_errors
+ delete_disk = True
+ else:
+ status['disk_response'] = None
+ status['disk_success'] = no_errors
+ # If we are destroying disks, and the node has been deleted,
+ # destroy the disk.
+ if delete_disk and destroy_boot_disk:
+ boot_disk = status['node'].extra['boot_disk']
+ if boot_disk:
+ request = '/zones/%s/disks/%s' % (
+ boot_disk.extra['zone'].name, boot_disk.name)
+ try:
+ response = self.connection.request(
+ request, method='DELETE').object
+ except GoogleBaseError:
+ self._catch_error(ignore_errors=ignore_errors)
+ no_errors = False
+ response = None
+ status['disk_response'] = response
+ else: # If there is no boot disk, ignore
+ status['disk_success'] = True
+ operation = status['node_response'] or status['disk_response']
+ if operation:
+ time.sleep(poll_interval)
+ complete = False
+
+ success = []
+ for status in status_list:
+ s = status['node_success'] and status['disk_success']
+ success.append(s)
+ return success
+
+ def ex_destroy_targetpool(self, targetpool):
+ """
+ Destroy a target pool.
+
+ :param targetpool: TargetPool object to destroy
+ :type targetpool: :class:`GCETargetPool`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/regions/%s/targetPools/%s' % (targetpool.region.name,
+ targetpool.name)
+
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def destroy_volume(self, volume):
+ """
+ Destroy a volume.
+
+ :param volume: Volume object to destroy
+ :type volume: :class:`StorageVolume`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/zones/%s/disks/%s' % (volume.extra['zone'].name,
+ volume.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def destroy_volume_snapshot(self, snapshot):
+ """
+ Destroy a snapshot.
+
+ :param snapshot: Snapshot object to destroy
+ :type snapshot: :class:`GCESnapshot`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ request = '/global/snapshots/%s' % (snapshot.name)
+ self.connection.async_request(request, method='DELETE')
+ return True
+
+ def ex_get_address(self, name, region=None):
+ """
+ Return an Address object based on an address name and optional region.
+
+ :param name: The name of the address
+ :type name: ``str``
+
+ :keyword region: The region to search for the address in (set to
+ 'all' to search all regions)
+ :type region: ``str`` :class:`GCERegion` or ``None``
+
+ :return: An Address object for the address
+ :rtype: :class:`GCEAddress`
+ """
+ region = self._set_region(region) or self._find_zone_or_region(
+ name, 'addresses', region=True, res_name='Address')
+ request = '/regions/%s/addresses/%s' % (region.name, name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_address(response)
+
+ def ex_get_healthcheck(self, name):
+ """
+ Return a HealthCheck object based on the healthcheck name.
+
+ :param name: The name of the healthcheck
+ :type name: ``str``
+
+ :return: A GCEHealthCheck object
+ :rtype: :class:`GCEHealthCheck`
+ """
+ request = '/global/httpHealthChecks/%s' % (name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_healthcheck(response)
+
+ def ex_get_firewall(self, name):
+ """
+ Return a Firewall object based on the firewall name.
+
+ :param name: The name of the firewall
+ :type name: ``str``
+
+ :return: A GCEFirewall object
+ :rtype: :class:`GCEFirewall`
+ """
+ request = '/global/firewalls/%s' % (name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_firewall(response)
+
+ def ex_get_forwarding_rule(self, name, region=None):
+ """
+ Return a Forwarding Rule object based on the forwarding rule name.
+
+ :param name: The name of the forwarding rule
+ :type name: ``str``
+
+ :keyword region: The region to search for the rule in (set to 'all'
+ to search all regions).
+ :type region: ``str`` or ``None``
+
+ :return: A GCEForwardingRule object
+ :rtype: :class:`GCEForwardingRule`
+ """
+ region = self._set_region(region) or self._find_zone_or_region(
+ name, 'forwardingRules', region=True, res_name='ForwardingRule')
+ request = '/regions/%s/forwardingRules/%s' % (region.name, name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_forwarding_rule(response)
+
+ def ex_get_image(self, partial_name):
+ """
+ Return an GCENodeImage object based on the name or link provided.
+
+ :param partial_name: The name, partial name, or full path of a GCE
+ image.
+ :type partial_name: ``str``
+
+ :return: GCENodeImage object based on provided information or None if
+ an image with that name is not found.
+ :rtype: :class:`GCENodeImage` or ``None``
+ """
+ if partial_name.startswith('https://'):
+ response = self.connection.request(partial_name, method='GET')
+ return self._to_node_image(response.object)
+ image = self._match_images(None, partial_name)
+ if not image:
+ if partial_name.startswith('debian'):
+ image = self._match_images('debian-cloud', partial_name)
+ elif partial_name.startswith('centos'):
+ image = self._match_images('centos-cloud', partial_name)
+ elif partial_name.startswith('container-vm'):
+ image = self._match_images('google-containers', partial_name)
+
+ return image
+
+ def ex_get_network(self, name):
+ """
+ Return a Network object based on a network name.
+
+ :param name: The name of the network
+ :type name: ``str``
+
+ :return: A Network object for the network
+ :rtype: :class:`GCENetwork`
+ """
+ request = '/global/networks/%s' % (name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_network(response)
+
+ def ex_get_node(self, name, zone=None):
+ """
+ Return a Node object based on a node name and optional zone.
+
+ :param name: The name of the node
+ :type name: ``str``
+
+ :keyword zone: The zone to search for the node in. If set to 'all',
+ search all zones for the instance.
+ :type zone: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :return: A Node object for the node
+ :rtype: :class:`Node`
+ """
+ zone = self._set_zone(zone) or self._find_zone_or_region(
+ name, 'instances', res_name='Node')
+ request = '/zones/%s/instances/%s' % (zone.name, name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_node(response)
+
+ def ex_get_project(self):
+ """
+ Return a Project object with project-wide information.
+
+ :return: A GCEProject object
+ :rtype: :class:`GCEProject`
+ """
+ response = self.connection.request('', method='GET').object
+ return self._to_project(response)
+
+ def ex_get_size(self, name, zone=None):
+ """
+ Return a size object based on a machine type name and zone.
+
+ :param name: The name of the node
+ :type name: ``str``
+
+ :keyword zone: The zone to search for the machine type in
+ :type zone: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :return: A GCENodeSize object for the machine type
+ :rtype: :class:`GCENodeSize`
+ """
+ zone = zone or self.zone
+ if not hasattr(zone, 'name'):
+ zone = self.ex_get_zone(zone)
+ request = '/zones/%s/machineTypes/%s' % (zone.name, name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_node_size(response)
+
+ def ex_get_snapshot(self, name):
+ """
+ Return a Snapshot object based on snapshot name.
+
+ :param name: The name of the snapshot
+ :type name: ``str``
+
+ :return: A GCESnapshot object for the snapshot
+ :rtype: :class:`GCESnapshot`
+ """
+ request = '/global/snapshots/%s' % (name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_snapshot(response)
+
+ def ex_get_volume(self, name, zone=None):
+ """
+ Return a Volume object based on a volume name and optional zone.
+
+ :param name: The name of the volume
+ :type name: ``str``
+
+ :keyword zone: The zone to search for the volume in (set to 'all' to
+ search all zones)
+ :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation`
+ or ``None``
+
+ :return: A StorageVolume object for the volume
+ :rtype: :class:`StorageVolume`
+ """
+ zone = self._set_zone(zone) or self._find_zone_or_region(
+ name, 'disks', res_name='Volume')
+ request = '/zones/%s/disks/%s' % (zone.name, name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_storage_volume(response)
+
+ def ex_get_region(self, name):
+ """
+ Return a Region object based on the region name.
+
+ :param name: The name of the region.
+ :type name: ``str``
+
+ :return: A GCERegion object for the region
+ :rtype: :class:`GCERegion`
+ """
+ if name.startswith('https://'):
+ short_name = self._get_components_from_path(name)['name']
+ request = name
+ else:
+ short_name = name
+ request = '/regions/%s' % (name)
+ # Check region cache first
+ if short_name in self.region_dict:
+ return self.region_dict[short_name]
+ # Otherwise, look up region information
+ response = self.connection.request(request, method='GET').object
+ return self._to_region(response)
+
+ def ex_get_targetpool(self, name, region=None):
+ """
+ Return a TargetPool object based on a name and optional region.
+
+ :param name: The name of the target pool
+ :type name: ``str``
+
+ :keyword region: The region to search for the target pool in (set to
+ 'all' to search all regions).
+ :type region: ``str`` or :class:`GCERegion` or ``None``
+
+ :return: A TargetPool object for the pool
+ :rtype: :class:`GCETargetPool`
+ """
+ region = self._set_region(region) or self._find_zone_or_region(
+ name, 'targetPools', region=True, res_name='TargetPool')
+ request = '/regions/%s/targetPools/%s' % (region.name, name)
+ response = self.connection.request(request, method='GET').object
+ return self._to_targetpool(response)
+
+ def ex_get_zone(self, name):
+ """
+ Return a Zone object based on the zone name.
+
+ :param name: The name of the zone.
+ :type name: ``str``
+
+ :return: A GCEZone object for the zone or None if not found
+ :rtype: :class:`GCEZone` or ``None``
+ """
+ if name.startswith('https://'):
+ short_name = self._get_components_from_path(name)['name']
+ request = name
+ else:
+ short_name = name
+ request = '/zones/%s' % (name)
+ # Check zone cache first
+ if short_name in self.zone_dict:
+ return self.zone_dict[short_name]
+ # Otherwise, look up zone information
+ try:
+ response = self.connection.request(request, method='GET').object
+ except ResourceNotFoundError:
+ return None
+ return self._to_zone(response)
+
+ def ex_copy_image(self, name, url, description=None):
+ """
+ Copy an image to your image collection.
+
+ :param name: The name of the image
+ :type name: ``str``
+
+ :param url: The URL to the image. The URL can start with `gs://`
+ :param url: ``str``
+
+ :param description: The description of the image
+ :type description: ``str``
+
+ :return: NodeImage object based on provided information or None if an
+ image with that name is not found.
+ :rtype: :class:`NodeImage` or ``None``
+ """
+
+ # the URL for an image can start with gs://
+ if url.startswith('gs://'):
+ url = url.replace('gs://', 'https://storage.googleapis.com/', 1)
+
+ image_data = {
+ 'name': name,
+ 'description': description,
+ 'sourceType': 'RAW',
+ 'rawDisk': {
+ 'source': url,
+ },
+ }
+
+ request = '/global/images'
+ self.connection.async_request(request, method='POST',
+ data=image_data)
+ return self.ex_get_image(name)
+
+ def _ex_connection_class_kwargs(self):
+ return {'auth_type': self.auth_type,
+ 'project': self.project,
+ 'scopes': self.scopes}
+
+ def _catch_error(self, ignore_errors=False):
+ """
+ Catch an exception and raise it unless asked to ignore it.
+
+ :keyword ignore_errors: If true, just return the error. Otherwise,
+ raise the error.
+ :type ignore_errors: ``bool``
+
+ :return: The exception that was raised.
+ :rtype: :class:`Exception`
+ """
+ e = sys.exc_info()[1]
+ if ignore_errors:
+ return e
+ else:
+ raise e
+
+ def _get_components_from_path(self, path):
+ """
+ Return a dictionary containing name & zone/region from a request path.
+
+ :param path: HTTP request path (e.g.
+ '/project/pjt-name/zones/us-central1-a/instances/mynode')
+ :type path: ``str``
+
+ :return: Dictionary containing name and zone/region of resource
+ :rtype ``dict``
+ """
+ region = None
+ zone = None
+ glob = False
+ components = path.split('/')
+ name = components[-1]
+ if components[-4] == 'regions':
+ region = components[-3]
+ elif components[-4] == 'zones':
+ zone = components[-3]
+ elif components[-3] == 'global':
+ glob = True
+
+ return {'name': name, 'region': region, 'zone': zone, 'global': glob}
+
+ def _get_region_from_zone(self, zone):
+ """
+ Return the Region object that contains the given Zone object.
+
+ :param zone: Zone object
+ :type zone: :class:`GCEZone`
+
+ :return: Region object that contains the zone
+ :rtype: :class:`GCERegion`
+ """
+ for region in self.region_list:
+ zones = [z.name for z in region.zones]
+ if zone.name in zones:
+ return region
+
+ def _find_zone_or_region(self, name, res_type, region=False,
+ res_name=None):
+ """
+ Find the zone or region for a named resource.
+
+ :param name: Name of resource to find
+ :type name: ``str``
+
+ :param res_type: Type of resource to find.
+ Examples include: 'disks', 'instances' or 'addresses'
+ :type res_type: ``str``
+
+ :keyword region: If True, search regions instead of zones
+ :type region: ``bool``
+
+ :keyword res_name: The name of the resource type for error messages.
+ Examples: 'Volume', 'Node', 'Address'
+ :keyword res_name: ``str``
+
+ :return: Zone/Region object for the zone/region for the resource.
+ :rtype: :class:`GCEZone` or :class:`GCERegion`
+ """
+ if region:
+ rz = 'region'
+ else:
+ rz = 'zone'
+ rz_name = None
+ res_name = res_name or res_type
+ request = '/aggregated/%s' % (res_type)
+ res_list = self.connection.request(request).object
+ for k, v in res_list['items'].items():
+ for res in v.get(res_type, []):
+ if res['name'] == name:
+ rz_name = k.replace('%ss/' % (rz), '')
+ break
+ if not rz_name:
+ raise ResourceNotFoundError(
+ '%s \'%s\' not found in any %s.' % (res_name, name, rz),
+ None, None)
+ else:
+ getrz = getattr(self, 'ex_get_%s' % (rz))
+ return getrz(rz_name)
+
+ def _match_images(self, project, partial_name):
+ """
+ Find the latest image, given a partial name.
+
+ For example, providing 'debian-7' will return the image object for the
+ most recent image with a name that starts with 'debian-7' in the
+ supplied project. If no project is given, it will search your own
+ project.
+
+ :param project: The name of the project to search for images.
+ Examples include: 'debian-cloud' and 'centos-cloud'.
+ :type project: ``str`` or ``None``
+
+ :param partial_name: The full name or beginning of a name for an
+ image.
+ :type partial_name: ``str``
+
+ :return: The latest image object that matches the partial name or None
+ if no matching image is found.
+ :rtype: :class:`GCENodeImage` or ``None``
+ """
+ project_images = self.list_images(project)
+ partial_match = []
+ for image in project_images:
+ if image.name == partial_name:
+ return image
+ if image.name.startswith(partial_name):
+ ts = timestamp_to_datetime(image.extra['creationTimestamp'])
+ if not partial_match or partial_match[0] < ts:
+ partial_match = [ts, image]
+
+ if partial_match:
+ return partial_match[1]
+
+ def _set_region(self, region):
+ """
+ Return the region to use for listing resources.
+
+ :param region: A name, region object, None, or 'all'
+ :type region: ``str`` or :class:`GCERegion` or ``None``
+
+ :return: A region object or None if all regions should be considered
+ :rtype: :class:`GCERegion` or ``None``
+ """
+ region = region or self.region
+
+ if region == 'all' or region is None:
+ return None
+
+ if not hasattr(region, 'name'):
+ region = self.ex_get_region(region)
+ return region
+
+ def _set_zone(self, zone):
+ """
+ Return the zone to use for listing resources.
+
+ :param zone: A name, zone object, None, or 'all'
+ :type region: ``str`` or :class:`GCEZone` or ``None``
+
+ :return: A zone object or None if all zones should be considered
+ :rtype: :class:`GCEZone` or ``None``
+ """
+ zone = zone or self.zone
+
+ if zone == 'all' or zone is None:
+ return None
+
+ if not hasattr(zone, 'name'):
+ zone = self.ex_get_zone(zone)
+ return zone
+
+ def _create_node_req(self, name, size, image, location, network,
+ tags=None, metadata=None, boot_disk=None,
+ external_ip='ephemeral'):
+ """
+ Returns a request and body to create a new node. This is a helper
+ method to support both :class:`create_node` and
+ :class:`ex_create_multiple_nodes`.
+
+ :param name: The name of the node to create.
+ :type name: ``str``
+
+ :param size: The machine type to use.
+ :type size: :class:`GCENodeSize`
+
+ :param image: The image to use to create the node (or, if using a
+ persistent disk, the image the disk was created from).
+ :type image: :class:`GCENodeImage`
+
+ :param location: The location (zone) to create the node in.
+ :type location: :class:`NodeLocation` or :class:`GCEZone`
+
+ :param network: The network to associate with the node.
+ :type network: :class:`GCENetwork`
+
+ :keyword tags: A list of tags to associate with the node.
+ :type tags: ``list`` of ``str``
+
+ :keyword metadata: Metadata dictionary for instance.
+ :type metadata: ``dict``
+
+ :keyword boot_disk: Persistent boot disk to attach.
+ :type :class:`StorageVolume`
+
+ :keyword external_ip: The external IP address to use. If 'ephemeral'
+ (default), a new non-static address will be
+ used. If 'None', then no external address will
+ be used. To use an existing static IP address,
+ a GCEAddress object should be passed in.
+ :type external_ip: :class:`GCEAddress` or ``str`` or None
+
+ :return: A tuple containing a request string and a node_data dict.
+ :rtype: ``tuple`` of ``str`` and ``dict``
+ """
+ node_data = {}
+ node_data['machineType'] = size.extra['selfLink']
+ node_data['name'] = name
+ if tags:
+ node_data['tags'] = {'items': tags}
+ if metadata:
+ node_data['metadata'] = metadata
+
+ if boot_disk:
+ disks = [{'kind': 'compute#attachedDisk',
+ 'boot': True,
+ 'type': 'PERSISTENT',
+ 'mode': 'READ_WRITE',
+ 'deviceName': boot_disk.name,
+ 'zone': boot_disk.extra['zone'].extra['selfLink'],
+ 'source': boot_disk.extra['selfLink']}]
+ node_data['disks'] = disks
+ else:
+ node_data['image'] = image.extra['selfLink']
+
+ ni = [{'kind': 'compute#instanceNetworkInterface',
+ 'network': network.extra['selfLink']}]
+ if external_ip:
+ access_configs = [{'name': 'External NAT',
+ 'type': 'ONE_TO_ONE_NAT'}]
+ if hasattr(external_ip, 'address'):
+ access_configs[0]['natIP'] = external_ip.address
+ ni[0]['accessConfigs'] = access_configs
+ node_data['networkInterfaces'] = ni
+
+ request = '/zones/%s/instances' % (location.name)
+
+ return request, node_data
+
+ def _multi_create_disk(self, status, node_attrs):
+ """Create disk for ex_create_multiple_nodes.
+
+ :param status: Dictionary for holding node/disk creation status.
+ (This dictionary is modified by this method)
+ :type status: ``dict``
+
+ :param node_attrs: Dictionary for holding node attribute information.
+ (size, image, location, etc.)
+ :type node_attrs: ``dict``
+ """
+ disk = None
+ # Check for existing disk
+ if node_attrs['use_existing_disk']:
+ try:
+ disk = self.ex_get_volume(status['name'],
+ node_attrs['location'])
+ except ResourceNotFoundError:
+ pass
+
+ if disk:
+ status['disk'] = disk
+ else:
+ # Create disk and return response object back in the status dict.
+ # Or, if there is an error, mark as failed.
+ disk_req, disk_data, disk_params = self._create_vol_req(
+ None, status['name'], location=node_attrs['location'],
+ image=node_attrs['image'])
+ try:
+ disk_res = self.connection.request(
+ disk_req, method='POST', data=disk_data,
+ params=disk_params).object
+ except GoogleBaseError:
+ e = self._catch_error(
+ ignore_errors=node_attrs['ignore_errors'])
+ error = e.value
+ code = e.code
+ disk_res = None
+ status['disk'] = GCEFailedDisk(status['name'],
+ error, code)
+ status['disk_response'] = disk_res
+
+ def _multi_check_disk(self, status, node_attrs):
+ """Check disk status for ex_create_multiple_nodes.
+
+ :param status: Dictionary for holding node/disk creation status.
+ (This dictionary is modified by this method)
+ :type status: ``dict``
+
+ :param node_attrs: Dictionary for holding node attribute information.
+ (size, image, location, etc.)
+ :type node_attrs: ``dict``
+ """
+ error = None
+ try:
+ response = self.connection.request(
+ status['disk_response']['selfLink']).object
+ except GoogleBaseError:
+ e = self._catch_error(ignore_errors=node_attrs['ignore_errors'])
+ error = e.value
+ code = e.code
+ response = {'status': 'DONE'}
+ if response['status'] == 'DONE':
+ status['disk_response'] = None
+ if error:
+ status['disk'] = GCEFailedDisk(status['name'], error, code)
+ else:
+ status['disk'] = self.ex_get_volume(status['name'],
+ node_attrs['location'])
+
+ def _multi_create_node(self, status, node_attrs):
+ """Create node for ex_create_multiple_nodes.
+
+ :param status: Dictionary for holding node/disk creation status.
+ (This dictionary is modified by this method)
+ :type status: ``dict``
+
+ :param node_attrs: Dictionary for holding node attribute information.
+ (size, image, location, etc.)
+ :type node_attrs: ``dict``
+ """
+ # If disk has an error, set the node as failed and return
+ if hasattr(status['disk'], 'error'):
+ status['node'] = status['disk']
+ return
+
+ # Create node and return response object in status dictionary.
+ # Or, if there is an error, mark as failed.
+ request, node_data = self._create_node_req(
+ status['name'], node_attrs['size'], node_attrs['image'],
+ node_attrs['location'], node_attrs['network'], node_attrs['tags'],
+ node_attrs['metadata'], boot_disk=status['disk'],
+ external_ip=node_attrs['external_ip'])
+ try:
+ node_res = self.connection.request(
+ request, method='POST', data=node_data).object
+ except GoogleBaseError:
+ e = self._catch_error(ignore_errors=node_attrs['ignore_errors'])
+ error = e.value
+ code = e.code
+ node_res = None
+ status['node'] = GCEFailedNode(status['name'],
+ error, code)
+ status['node_response'] = node_res
+
+ def _multi_check_node(self, status, node_attrs):
+ """Check node status for ex_create_multiple_nodes.
+
+ :param status: Dictionary for holding node/disk creation status.
+ (This dictionary is modified by this method)
+ :type status: ``dict``
+
+ :param node_attrs: Dictionary for holding node attribute information.
+ (size, image, location, etc.)
+ :type node_attrs: ``dict``
+ """
+ error = None
+ try:
+ response = self.connection.request(
+ status['node_response']['selfLink']).object
+ except GoogleBaseError:
+ e = self._catch_error(ignore_errors=node_attrs['ignore_errors'])
+ error = e.value
+ code = e.code
+ response = {'status': 'DONE'}
+ if response['status'] == 'DONE':
+ status['node_response'] = None
+ if error:
+ status['node'] = GCEFailedNode(status['name'],
+ error, code)
+ else:
+ status['node'] = self.ex_get_node(status['name'],
+ node_attrs['location'])
+
+ def _create_vol_req(self, size, name, location=None, snapshot=None,
+ image=None):
+ """
+ Assemble the request/data for creating a volume.
+
+ Used by create_volume and ex_create_multiple_nodes
+
+ :param size: Size of volume to create (in GB). Can be None if image
+ or snapshot is supplied.
+ :type size: ``int`` or ``str`` or ``None``
+
+ :param name: Name of volume to create
+ :type name: ``str``
+
+ :keyword location: Location (zone) to create the volume in
+ :type location: ``str`` or :class:`GCEZone` or
+ :class:`NodeLocation` or ``None``
+
+ :keyword snapshot: Snapshot to create image from
+ :type snapshot: :class:`GCESnapshot` or ``str`` or ``None``
+
+ :keyword image: Image to create disk from.
+ :type image: :class:`GCENodeImage` or ``str`` or ``None``
+
+ :return: Tuple containing the request string, the data dictionary and
+ the URL parameters
+ :rtype: ``tuple``
+ """
+ volume_data = {}
+ params = None
+ volume_data['name'] = name
+ if size:
+ volume_data['sizeGb'] = str(size)
+ if image:
+ if not hasattr(image, 'name'):
+ image = self.ex_get_image(image)
+ params = {'sourceImage': image.extra['selfLink']}
+ volume_data['description'] = 'Image: %s' % (
+ image.extra['selfLink'])
+ if snapshot:
+ if not hasattr(snapshot, 'name'):
+ # Check for full URI to not break backward-compatibility
+ if snapshot.startswith('https'):
+ snapshot = self._get_components_from_path(snapshot)['name']
+ snapshot = self.ex_get_snapshot(snapshot)
+ snapshot_link = snapshot.extra['selfLink']
+ volume_data['sourceSnapshot'] = snapshot_link
+ volume_data['description'] = 'Snapshot: %s' % (snapshot_link)
+ location = location or self.zone
+ if not hasattr(location, 'name'):
+ location = self.ex_get_zone(location)
+ request = '/zones/%s/disks' % (location.name)
+
+ return request, volume_data, params
+
+ def _to_address(self, address):
+ """
+ Return an Address object from the json-response dictionary.
+
+ :param address: The dictionary describing the address.
+ :type address: ``dict``
+
+ :return: Address object
+ :rtype: :class:`GCEAddress`
+ """
+ extra = {}
+
+ region = self.ex_get_region(address['region'])
+
+ extra['selfLink'] = address.get('selfLink')
+ extra['status'] = address.get('status')
+ extra['creationTimestamp'] = address.get('creationTimestamp')
+
+ return GCEAddress(id=address['id'], name=address['name'],
+ address=address['address'],
+ region=region, driver=self, extra=extra)
+
+ def _to_healthcheck(self, healthcheck):
+ """
+ Return a HealthCheck object from the json-response dictionary.
+
+ :param healthcheck: The dictionary describing the healthcheck.
+ :type healthcheck: ``dict``
+
+ :return: HealthCheck object
+ :rtype: :class:`GCEHealthCheck`
+ """
+ extra = {}
+ extra['selfLink'] = healthcheck.get('selfLink')
+ extra['creationTimestamp'] = healthcheck.get('creationTimestamp')
+ extra['description'] = healthcheck.get('description')
+ extra['host'] = healthcheck.get('host')
+
+ return GCEHealthCheck(
+ id=healthcheck['id'], name=healthcheck['name'],
+ path=healthcheck.get('requestPath'), port=healthcheck.get('port'),
+ interval=healthcheck.get('checkIntervalSec'),
+ timeout=healthcheck.get('timeoutSec'),
+ unhealthy_threshold=healthcheck.get('unhealthyThreshold'),
+ healthy_threshold=healthcheck.get('healthyThreshold'),
+ driver=self, extra=extra)
+
+ def _to_firewall(self, firewall):
+ """
+ Return a Firewall object from the json-response dictionary.
+
+ :param firewall: The dictionary describing the firewall.
+ :type firewall: ``dict``
+
+ :return: Firewall object
+ :rtype: :class:`GCEFirewall`
+ """
+ extra = {}
+ extra['selfLink'] = firewall.get('selfLink')
+ extra['creationTimestamp'] = firewall.get('creationTimestamp')
+ extra['description'] = firewall.get('description')
+ extra['network_name'] = self._get_components_from_path(
+ firewall['network'])['name']
+
+ network = self.ex_get_network(extra['network_name'])
+ source_ranges = firewall.get('sourceRanges')
+ source_tags = firewall.get('sourceTags')
+ target_tags = firewall.get('targetTags')
+
+ return GCEFirewall(id=firewall['id'], name=firewall['name'],
+ allowed=firewall.get('allowed'), network=network,
+ source_ranges=source_ranges,
+ source_tags=source_tags,
+ target_tags=target_tags,
+ driver=self, extra=extra)
+
+ def _to_forwarding_rule(self, forwarding_rule):
+ """
+ Return a Forwarding Rule object from the json-response dictionary.
+
+ :param forwarding_rule: The dictionary describing the rule.
+ :type forwarding_rule: ``dict``
+
+ :return: ForwardingRule object
+ :rtype: :class:`GCEForwardingRule`
+ """
+ extra = {}
+ extra['selfLink'] = forwarding_rule.get('selfLink')
+ extra['portRange'] = forwarding_rule.get('portRange')
+ extra['creationTimestamp'] = forwarding_rule.get('creationTimestamp')
+ extra['description'] = forwarding_rule.get('description')
+
+ region = self.ex_get_region(forwarding_rule['region'])
+ targetpool = self.ex_get_targetpool(
+ self._get_components_from_path(forwarding_rule['target'])['name'])
+
+ return GCEForwardingRule(id=forwarding_rule['id'],
+ name=forwarding_rule['name'], region=region,
+ address=forwarding_rule.get('IPAddress'),
+ protocol=forwarding_rule.get('IPProtocol'),
+ targetpool=targetpool,
+ driver=self, extra=extra)
+
+ def _to_network(self, network):
+ """
+ Return a Network object from the json-response dictionary.
+
+ :param network: The dictionary describing the network.
+ :type network: ``dict``
+
+ :return: Network object
+ :rtype: :class:`GCENetwork`
+ """
+ extra = {}
+
+ extra['selfLink'] = network.get('selfLink')
+ extra['gatewayIPv4'] = network.get('gatewayIPv4')
+ extra['description'] = network.get('description')
+ extra['creationTimestamp'] = network.get('creationTimestamp')
+
+ return GCENetwork(id=network['id'], name=network['name'],
+ cidr=network.get('IPv4Range'),
+ driver=self, extra=extra)
+
+ def _to_node_image(self, image):
+ """
+ Return an Image object from the json-response dictionary.
+
+ :param image: The dictionary describing the image.
+ :type image: ``dict``
+
+ :return: Image object
+ :rtype: :class:`GCENodeImage`
+ """
+ extra = {}
+ extra['preferredKernel'] = image.get('preferredKernel', None)
+ extra['description'] = image.get('description', None)
+ extra['creationTimestamp'] = image.get('creationTimestamp')
+ extra['selfLink'] = image.get('selfLink')
+ extra['deprecated'] = image.get('deprecated', None)
+
+ return GCENodeImage(id=image['id'], name=image['name'], driver=self,
+ extra=extra)
+
+ def _to_node_location(self, location):
+ """
+ Return a Location object from the json-response dictionary.
+
+ :param location: The dictionary describing the location.
+ :type location: ``dict``
+
+ :return: Location object
+ :rtype: :class:`NodeLocation`
+ """
+ return NodeLocation(id=location['id'], name=location['name'],
+ country=location['name'].split('-')[0],
+ driver=self)
+
+ def _to_node(self, node):
+ """
+ Return a Node object from the json-response dictionary.
+
+ :param node: The dictionary describing the node.
+ :type node: ``dict``
+
+ :return: Node object
+ :rtype: :class:`Node`
+ """
+ public_ips = []
+ private_ips = []
+ extra = {}
+
+ extra['status'] = node.get('status')
+ extra['description'] = node.get('description')
+ extra['zone'] = self.ex_get_zone(node['zone'])
+ extra['image'] = node.get('image')
+ extra['machineType'] = node.get('machineType')
+ extra['disks'] = node.get('disks', [])
+ extra['networkInterfaces'] = node.get('networkInterfaces')
+ extra['id'] = node['id']
+ extra['selfLink'] = node.get('selfLink')
+ extra['name'] = node['name']
+ extra['metadata'] = node.get('metadata', {})
+ extra['tags_fingerprint'] = node['tags']['fingerprint']
+ extra['scheduling'] = node.get('scheduling', {})
+ extra['deprecated'] = True if node.get('deprecated', None) else False
+
+ for disk in extra['disks']:
+ if disk.get('boot') and disk.get('type') == 'PERSISTENT':
+ bd = self._get_components_from_path(disk['source'])
+ extra['boot_disk'] = self.ex_get_volume(bd['name'], bd['zone'])
+
+ if 'items' in node['tags']:
+ tags = node['tags']['items']
+ else:
+ tags = []
+ extra['tags'] = tags
+
+ for network_interface in node.get('networkInterfaces', []):
+ private_ips.append(network_interface.get('networkIP'))
+ for access_config in network_interface.get('accessConfigs', []):
+ public_ips.append(access_config.get('natIP'))
+
+ # For the node attributes, use just machine and image names, not full
+ # paths. Full paths are available in the "extra" dict.
+ if extra['image']:
+ image = self._get_components_from_path(extra['image'])['name']
+ else:
+ image = None
+ size = self._get_components_from_path(node['machineType'])['name']
+
+ return Node(id=node['id'], name=node['name'],
+ state=self.NODE_STATE_MAP[node['status']],
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self, size=size, image=image, extra=extra)
+
+ def _to_node_size(self, machine_type):
+ """
+ Return a Size object from the json-response dictionary.
+
+ :param machine_type: The dictionary describing the machine.
+ :type machine_type: ``dict``
+
+ :return: Size object
+ :rtype: :class:`GCENodeSize`
+ """
+ extra = {}
+ extra['selfLink'] = machine_type.get('selfLink')
+ extra['zone'] = self.ex_get_zone(machine_type['zone'])
+ extra['description'] = machine_type.get('description')
+ extra['guestCpus'] = machine_type.get('guestCpus')
+ extra['creationTimestamp'] = machine_type.get('creationTimestamp')
+ try:
+ price = self._get_size_price(size_id=machine_type['name'])
+ except KeyError:
+ price = None
+
+ return GCENodeSize(id=machine_type['id'], name=machine_type['name'],
+ ram=machine_type.get('memoryMb'),
+ disk=machine_type.get('imageSpaceGb'),
+ bandwidth=0, price=price, driver=self, extra=extra)
+
+ def _to_project(self, project):
+ """
+ Return a Project object from the json-response dictionary.
+
+ :param project: The dictionary describing the project.
+ :type project: ``dict``
+
+ :return: Project object
+ :rtype: :class:`GCEProject`
+ """
+ extra = {}
+ extra['selfLink'] = project.get('selfLink')
+ extra['creationTimestamp'] = project.get('creationTimestamp')
+ extra['description'] = project.get('description')
+ metadata = project['commonInstanceMetadata'].get('items')
+
+ return GCEProject(id=project['id'], name=project['name'],
+ metadata=metadata, quotas=project.get('quotas'),
+ driver=self, extra=extra)
+
+ def _to_region(self, region):
+ """
+ Return a Region object from the json-response dictionary.
+
+ :param region: The dictionary describing the region.
+ :type region: ``dict``
+
+ :return: Region object
+ :rtype: :class:`GCERegion`
+ """
+ extra = {}
+ extra['selfLink'] = region.get('selfLink')
+ extra['creationTimestamp'] = region.get('creationTimestamp')
+ extra['description'] = region.get('description')
+
+ quotas = region.get('quotas')
+ zones = [self.ex_get_zone(z) for z in region.get('zones', [])]
+ # Work around a bug that will occasionally list missing zones in the
+ # region output
+ zones = [z for z in zones if z is not None]
+ deprecated = region.get('deprecated')
+
+ return GCERegion(id=region['id'], name=region['name'],
+ status=region.get('status'), zones=zones,
+ quotas=quotas, deprecated=deprecated,
+ driver=self, extra=extra)
+
+ def _to_snapshot(self, snapshot):
+ """
+ Return a Snapshot object from the json-response dictionary.
+
+ :param snapshot: The dictionary describing the snapshot
+ :type snapshot: ``dict``
+
+ :return: Snapshot object
+ :rtype: :class:`VolumeSnapshot`
+ """
+ extra = {}
+ extra['selfLink'] = snapshot.get('selfLink')
+ extra['creationTimestamp'] = snapshot.get('creationTimestamp')
+ extra['sourceDisk'] = snapshot.get('sourceDisk')
+
+ return GCESnapshot(id=snapshot['id'], name=snapshot['name'],
+ size=snapshot['diskSizeGb'],
+ status=snapshot.get('status'), driver=self,
+ extra=extra)
+
+ def _to_storage_volume(self, volume):
+ """
+ Return a Volume object from the json-response dictionary.
+
+ :param volume: The dictionary describing the volume.
+ :type volume: ``dict``
+
+ :return: Volume object
+ :rtype: :class:`StorageVolume`
+ """
+ extra = {}
+ extra['selfLink'] = volume.get('selfLink')
+ extra['zone'] = self.ex_get_zone(volume['zone'])
+ extra['status'] = volume.get('status')
+ extra['creationTimestamp'] = volume.get('creationTimestamp')
+ extra['description'] = volume.get('description')
+
+ return StorageVolume(id=volume['id'], name=volume['name'],
+ size=volume['sizeGb'], driver=self, extra=extra)
+
+ def _to_targetpool(self, targetpool):
+ """
+ Return a Target Pool object from the json-response dictionary.
+
+ :param targetpool: The dictionary describing the volume.
+ :type targetpool: ``dict``
+
+ :return: Target Pool object
+ :rtype: :class:`GCETargetPool`
+ """
+ extra = {}
+ extra['selfLink'] = targetpool.get('selfLink')
+ extra['description'] = targetpool.get('description')
+ region = self.ex_get_region(targetpool['region'])
+ healthcheck_list = [self.ex_get_healthcheck(h.split('/')[-1]) for h
+ in targetpool.get('healthChecks', [])]
+ node_list = []
+ for n in targetpool.get('instances', []):
+ # Nodes that do not exist can be part of a target pool. If the
+ # node does not exist, use the URL of the node instead of the node
+ # object.
+ comp = self._get_components_from_path(n)
+ try:
+ node = self.ex_get_node(comp['name'], comp['zone'])
+ except ResourceNotFoundError:
+ node = n
+ node_list.append(node)
+
+ return GCETargetPool(id=targetpool['id'], name=targetpool['name'],
+ region=region, healthchecks=healthcheck_list,
+ nodes=node_list, driver=self, extra=extra)
+
+ def _to_zone(self, zone):
+ """
+ Return a Zone object from the json-response dictionary.
+
+ :param zone: The dictionary describing the zone.
+ :type zone: ``dict``
+
+ :return: Zone object
+ :rtype: :class:`GCEZone`
+ """
+ extra = {}
+ extra['selfLink'] = zone.get('selfLink')
+ extra['creationTimestamp'] = zone.get('creationTimestamp')
+ extra['description'] = zone.get('description')
+
+ deprecated = zone.get('deprecated')
+
+ return GCEZone(id=zone['id'], name=zone['name'], status=zone['status'],
+ maintenance_windows=zone.get('maintenanceWindows'),
+ deprecated=deprecated, driver=self, extra=extra)
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/gogrid.py b/awx/lib/site-packages/libcloud/compute/drivers/gogrid.py
new file mode 100644
index 0000000000..6c73e46eb9
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/gogrid.py
@@ -0,0 +1,464 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+GoGrid driver
+"""
+import time
+import hashlib
+import copy
+
+from libcloud.utils.py3 import b
+
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState
+from libcloud.compute.base import Node, NodeDriver
+from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
+
+STATE = {
+ "Starting": NodeState.PENDING,
+ "On": NodeState.RUNNING,
+ "On/Saving": NodeState.RUNNING,
+ "Off": NodeState.PENDING,
+ "Restarting": NodeState.REBOOTING,
+ "Saving": NodeState.PENDING,
+ "Restoring": NodeState.PENDING,
+}
+
+GOGRID_INSTANCE_TYPES = {
+ '512MB': {'id': '512MB',
+ 'name': '512MB',
+ 'ram': 512,
+ 'disk': 30,
+ 'bandwidth': None},
+ '1GB': {'id': '1GB',
+ 'name': '1GB',
+ 'ram': 1024,
+ 'disk': 60,
+ 'bandwidth': None},
+ '2GB': {'id': '2GB',
+ 'name': '2GB',
+ 'ram': 2048,
+ 'disk': 120,
+ 'bandwidth': None},
+ '4GB': {'id': '4GB',
+ 'name': '4GB',
+ 'ram': 4096,
+ 'disk': 240,
+ 'bandwidth': None},
+ '8GB': {'id': '8GB',
+ 'name': '8GB',
+ 'ram': 8192,
+ 'disk': 480,
+ 'bandwidth': None},
+ '16GB': {'id': '16GB',
+ 'name': '16GB',
+ 'ram': 16384,
+ 'disk': 960,
+ 'bandwidth': None},
+ '24GB': {'id': '24GB',
+ 'name': '24GB',
+ 'ram': 24576,
+ 'disk': 960,
+ 'bandwidth': None},
+}
+
+
+class GoGridNode(Node):
+ # Generating uuid based on public ip to get around missing id on
+ # create_node in gogrid api
+ #
+ # Used public ip since it is not mutable and specified at create time,
+ # so uuid of node should not change after add is completed
+ def get_uuid(self):
+ return hashlib.sha1(
+ b("%s:%s" % (self.public_ips, self.driver.type))
+ ).hexdigest()
+
+
+class GoGridNodeDriver(BaseGoGridDriver, NodeDriver):
+ """
+ GoGrid node driver
+ """
+
+ connectionCls = GoGridConnection
+ type = Provider.GOGRID
+ api_name = 'gogrid'
+ name = 'GoGrid'
+ website = 'http://www.gogrid.com/'
+ features = {"create_node": ["generates_password"]}
+
+ _instance_types = GOGRID_INSTANCE_TYPES
+
+ def __init__(self, *args, **kwargs):
+ """
+ @inherits: :class:`NodeDriver.__init__`
+ """
+ super(GoGridNodeDriver, self).__init__(*args, **kwargs)
+
+ def _get_state(self, element):
+ try:
+ return STATE[element['state']['name']]
+ except:
+ pass
+ return NodeState.UNKNOWN
+
+ def _get_ip(self, element):
+ return element.get('ip').get('ip')
+
+ def _get_id(self, element):
+ return element.get('id')
+
+ def _to_node(self, element, password=None):
+ state = self._get_state(element)
+ ip = self._get_ip(element)
+ id = self._get_id(element)
+ n = GoGridNode(id=id,
+ name=element['name'],
+ state=state,
+ public_ips=[ip],
+ private_ips=[],
+ extra={'ram': element.get('ram').get('name'),
+ 'description': element.get('description', '')},
+ driver=self.connection.driver)
+ if password:
+ n.extra['password'] = password
+
+ return n
+
+ def _to_image(self, element):
+ n = NodeImage(id=element['id'],
+ name=element['friendlyName'],
+ driver=self.connection.driver)
+ return n
+
+ def _to_images(self, object):
+ return [self._to_image(el)
+ for el in object['list']]
+
+ def _to_location(self, element):
+ location = NodeLocation(id=element['id'],
+ name=element['name'],
+ country="US",
+ driver=self.connection.driver)
+ return location
+
+ def _to_locations(self, object):
+ return [self._to_location(el)
+ for el in object['list']]
+
+ def list_images(self, location=None):
+ params = {}
+ if location is not None:
+ params["datacenter"] = location.id
+ images = self._to_images(
+ self.connection.request('/api/grid/image/list', params).object)
+ return images
+
+ def list_nodes(self):
+ """
+ @inherits: :class:`NodeDriver.list_nodes`
+ :rtype: ``list`` of :class:`GoGridNode`
+ """
+ passwords_map = {}
+
+ res = self._server_list()
+ try:
+ for password in self._password_list()['list']:
+ try:
+ passwords_map[password['server']['id']] = \
+ password['password']
+ except KeyError:
+ pass
+ except InvalidCredsError:
+ # some gogrid API keys don't have permission to access the
+ # password list.
+ pass
+
+ return [self._to_node(el, passwords_map.get(el.get('id')))
+ for el in res['list']]
+
+ def reboot_node(self, node):
+ """
+ @inherits: :class:`NodeDriver.reboot_node`
+ :type node: :class:`GoGridNode`
+ """
+ id = node.id
+ power = 'restart'
+ res = self._server_power(id, power)
+ if not res.success():
+ raise Exception(res.parse_error())
+ return True
+
+ def destroy_node(self, node):
+ """
+ @inherits: :class:`NodeDriver.reboot_node`
+ :type node: :class:`GoGridNode`
+ """
+ id = node.id
+ res = self._server_delete(id)
+ if not res.success():
+ raise Exception(res.parse_error())
+ return True
+
+ def _server_list(self):
+ return self.connection.request('/api/grid/server/list').object
+
+ def _password_list(self):
+ return self.connection.request('/api/support/password/list').object
+
+ def _server_power(self, id, power):
+ # power in ['start', 'stop', 'restart']
+ params = {'id': id, 'power': power}
+ return self.connection.request("/api/grid/server/power", params,
+ method='POST')
+
+ def _server_delete(self, id):
+ params = {'id': id}
+ return self.connection.request("/api/grid/server/delete", params,
+ method='POST')
+
+ def _get_first_ip(self, location=None):
+ ips = self.ex_list_ips(public=True, assigned=False, location=location)
+ try:
+ return ips[0].ip
+ except IndexError:
+ raise LibcloudError('No public unassigned IPs left',
+ GoGridNodeDriver)
+
+ def list_sizes(self, location=None):
+ sizes = []
+ for key, values in self._instance_types.items():
+ attributes = copy.deepcopy(values)
+ attributes.update({'price': self._get_size_price(size_id=key)})
+ sizes.append(NodeSize(driver=self.connection.driver, **attributes))
+
+ return sizes
+
+ def list_locations(self):
+ locations = self._to_locations(
+ self.connection.request('/api/common/lookup/list',
+ params={'lookup': 'ip.datacenter'}).object)
+ return locations
+
+ def ex_create_node_nowait(self, **kwargs):
+ """Don't block until GoGrid allocates id for a node
+ but return right away with id == None.
+
+ The existence of this method is explained by the fact
+ that GoGrid assigns id to a node only few minutes after
+ creation.
+
+
+ :keyword name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :keyword size: The size of resources allocated to this node .
+ (required)
+ :type size: :class:`NodeSize`
+
+ :keyword image: OS Image to boot on node. (required)
+ :type image: :class:`NodeImage`
+
+ :keyword ex_description: Description of a Node
+ :type ex_description: ``str``
+
+ :keyword ex_ip: Public IP address to use for a Node. If not
+ specified, first available IP address will be picked
+ :type ex_ip: ``str``
+
+ :rtype: :class:`GoGridNode`
+ """
+ name = kwargs['name']
+ image = kwargs['image']
+ size = kwargs['size']
+ try:
+ ip = kwargs['ex_ip']
+ except KeyError:
+ ip = self._get_first_ip(kwargs.get('location'))
+
+ params = {'name': name,
+ 'image': image.id,
+ 'description': kwargs.get('ex_description', ''),
+ 'server.ram': size.id,
+ 'ip': ip}
+
+ object = self.connection.request('/api/grid/server/add',
+ params=params, method='POST').object
+ node = self._to_node(object['list'][0])
+
+ return node
+
+ def create_node(self, **kwargs):
+ """Create a new GoGird node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_description: Description of a Node
+ :type ex_description: ``str``
+
+ :keyword ex_ip: Public IP address to use for a Node. If not
+ specified, first available IP address will be picked
+ :type ex_ip: ``str``
+
+ :rtype: :class:`GoGridNode`
+ """
+ node = self.ex_create_node_nowait(**kwargs)
+
+ timeout = 60 * 20
+ waittime = 0
+ interval = 2 * 60
+
+ while node.id is None and waittime < timeout:
+ nodes = self.list_nodes()
+
+ for i in nodes:
+ if i.public_ips[0] == node.public_ips[0] and i.id is not None:
+ return i
+
+ waittime += interval
+ time.sleep(interval)
+
+ if id is None:
+ raise Exception(
+ "Wasn't able to wait for id allocation for the node %s"
+ % str(node))
+
+ return node
+
+ def ex_save_image(self, node, name):
+ """Create an image for node.
+
+ Please refer to GoGrid documentation to get info
+ how prepare a node for image creation:
+
+ http://wiki.gogrid.com/wiki/index.php/MyGSI
+
+ :keyword node: node to use as a base for image
+ :type node: :class:`GoGridNode`
+
+ :keyword name: name for new image
+ :type name: ``str``
+
+ :rtype: :class:`NodeImage`
+ """
+ params = {'server': node.id,
+ 'friendlyName': name}
+ object = self.connection.request('/api/grid/image/save', params=params,
+ method='POST').object
+
+ return self._to_images(object)[0]
+
+ def ex_edit_node(self, **kwargs):
+ """Change attributes of a node.
+
+ :keyword node: node to be edited (required)
+ :type node: :class:`GoGridNode`
+
+ :keyword size: new size of a node (required)
+ :type size: :class:`NodeSize`
+
+ :keyword ex_description: new description of a node
+ :type ex_description: ``str``
+
+ :rtype: :class:`Node`
+ """
+ node = kwargs['node']
+ size = kwargs['size']
+
+ params = {'id': node.id,
+ 'server.ram': size.id}
+
+ if 'ex_description' in kwargs:
+ params['description'] = kwargs['ex_description']
+
+ object = self.connection.request('/api/grid/server/edit',
+ params=params).object
+
+ return self._to_node(object['list'][0])
+
+ def ex_edit_image(self, **kwargs):
+ """Edit metadata of a server image.
+
+ :keyword image: image to be edited (required)
+ :type image: :class:`NodeImage`
+
+ :keyword public: should be the image public (required)
+ :type public: ``bool``
+
+ :keyword ex_description: description of the image (optional)
+ :type ex_description: ``str``
+
+ :keyword name: name of the image
+ :type name: ``str``
+
+ :rtype: :class:`NodeImage`
+ """
+
+ image = kwargs['image']
+ public = kwargs['public']
+
+ params = {'id': image.id,
+ 'isPublic': str(public).lower()}
+
+ if 'ex_description' in kwargs:
+ params['description'] = kwargs['ex_description']
+
+ if 'name' in kwargs:
+ params['friendlyName'] = kwargs['name']
+
+ object = self.connection.request('/api/grid/image/edit',
+ params=params).object
+
+ return self._to_image(object['list'][0])
+
+ def ex_list_ips(self, **kwargs):
+ """Return list of IP addresses assigned to
+ the account.
+
+ :keyword public: set to True to list only
+ public IPs or False to list only
+ private IPs. Set to None or not specify
+ at all not to filter by type
+ :type public: ``bool``
+
+ :keyword assigned: set to True to list only addresses
+ assigned to servers, False to list unassigned
+ addresses and set to None or don't set at all
+ not no filter by state
+ :type assigned: ``bool``
+
+ :keyword location: filter IP addresses by location
+ :type location: :class:`NodeLocation`
+
+ :rtype: ``list`` of :class:`GoGridIpAddress`
+ """
+
+ params = {}
+
+ if "public" in kwargs and kwargs["public"] is not None:
+ params["ip.type"] = {True: "Public",
+ False: "Private"}[kwargs["public"]]
+ if "assigned" in kwargs and kwargs["assigned"] is not None:
+ params["ip.state"] = {True: "Assigned",
+ False: "Unassigned"}[kwargs["assigned"]]
+ if "location" in kwargs and kwargs['location'] is not None:
+ params['datacenter'] = kwargs['location'].id
+
+ ips = self._to_ips(
+ self.connection.request('/api/grid/ip/list',
+ params=params).object)
+ return ips
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/gridspot.py b/awx/lib/site-packages/libcloud/compute/drivers/gridspot.py
new file mode 100644
index 0000000000..856b0688f2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/gridspot.py
@@ -0,0 +1,127 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.compute.base import NodeDriver, Node
+from libcloud.compute.base import NodeState
+from libcloud.common.base import ConnectionKey, JsonResponse
+from libcloud.compute.types import Provider
+from libcloud.common.types import InvalidCredsError
+
+
+class GridspotAPIException(Exception):
+ def __str__(self):
+ return self.args[0]
+
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class GridspotResponse(JsonResponse):
+ """
+ Response class for Gridspot
+ """
+ def parse_body(self):
+ body = super(GridspotResponse, self).parse_body()
+
+ if 'exception_name' in body and body['exception_name']:
+ raise GridspotAPIException(body['exception_name'])
+
+ return body
+
+ def parse_error(self):
+ # Gridspot 404s on invalid api key or instance_id
+ raise InvalidCredsError("Invalid api key/instance_id")
+
+
+class GridspotConnection(ConnectionKey):
+ """
+ Connection class to connect to Gridspot's API servers
+ """
+
+ host = 'gridspot.com'
+ responseCls = GridspotResponse
+
+ def add_default_params(self, params):
+ params['api_key'] = self.key
+ return params
+
+
+class GridspotNodeDriver(NodeDriver):
+ """
+ Gridspot (http://www.gridspot.com/) node driver.
+ """
+
+ type = Provider.GRIDSPOT
+ name = 'Gridspot'
+ website = 'http://www.gridspot.com/'
+ connectionCls = GridspotConnection
+ NODE_STATE_MAP = {
+ 'Running': NodeState.RUNNING,
+ 'Starting': NodeState.PENDING
+ }
+
+ def list_nodes(self):
+ data = self.connection.request(
+ '/compute_api/v1/list_instances').object
+ return [self._to_node(n) for n in data['instances']]
+
+ def destroy_node(self, node):
+ data = {'instance_id': node.id}
+ self.connection.request('/compute_api/v1/stop_instance', data).object
+ return True
+
+ def _get_node_state(self, state):
+ result = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
+ return result
+
+ def _add_int_param(self, params, data, field):
+ if data[field]:
+ try:
+ params[field] = int(data[field])
+ except:
+ pass
+
+ def _to_node(self, data):
+ port = None
+ ip = None
+
+ state = self._get_node_state(data['current_state'])
+
+ if data['vm_ssh_wan_ip_endpoint'] != 'null':
+ parts = data['vm_ssh_wan_ip_endpoint'].split(':')
+ ip = parts[0]
+ port = int(parts[1])
+
+ extra_params = {
+ 'winning_bid_id': data['winning_bid_id'],
+ 'port': port
+ }
+
+ # Spec is vague and doesn't indicate if these will always be present
+ self._add_int_param(extra_params, data, 'vm_num_logical_cores')
+ self._add_int_param(extra_params, data, 'vm_num_physical_cores')
+ self._add_int_param(extra_params, data, 'vm_ram')
+ self._add_int_param(extra_params, data, 'start_state_time')
+ self._add_int_param(extra_params, data, 'ended_state_time')
+ self._add_int_param(extra_params, data, 'running_state_time')
+
+ return Node(
+ id=data['instance_id'],
+ name=data['instance_id'],
+ state=state,
+ public_ips=[ip],
+ private_ips=[],
+ driver=self.connection.driver,
+ extra=extra_params)
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/hostvirtual.py b/awx/lib/site-packages/libcloud/compute/drivers/hostvirtual.py
new file mode 100644
index 0000000000..e9fa6c80a5
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/hostvirtual.py
@@ -0,0 +1,341 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+libcloud driver for the Host Virtual Inc. (VR) API
+Home page http://www.vr.org/
+"""
+
+import time
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.common.hostvirtual import HostVirtualResponse
+from libcloud.common.hostvirtual import HostVirtualConnection
+from libcloud.common.hostvirtual import HostVirtualException
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState
+from libcloud.compute.base import Node, NodeDriver
+from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
+from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
+
+API_ROOT = ''
+
+NODE_STATE_MAP = {
+ 'BUILDING': NodeState.PENDING,
+ 'PENDING': NodeState.PENDING,
+ 'RUNNING': NodeState.RUNNING, # server is powered up
+ 'STOPPING': NodeState.REBOOTING,
+ 'REBOOTING': NodeState.REBOOTING,
+ 'STARTING': NodeState.REBOOTING,
+ 'TERMINATED': NodeState.TERMINATED, # server is powered down
+ 'STOPPED': NodeState.STOPPED
+}
+
+DEFAULT_NODE_LOCATION_ID = 4
+
+
+class HostVirtualComputeResponse(HostVirtualResponse):
+ pass
+
+
+class HostVirtualComputeConnection(HostVirtualConnection):
+ responseCls = HostVirtualComputeResponse
+
+
+class HostVirtualNodeDriver(NodeDriver):
+ type = Provider.HOSTVIRTUAL
+ name = 'HostVirtual'
+ website = 'http://www.vr.org'
+ connectionCls = HostVirtualComputeConnection
+ features = {'create_node': ['ssh_key', 'password']}
+
+ def __init__(self, key, secure=True, host=None, port=None):
+ self.location = None
+ super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure,
+ host=host, port=port)
+
+ def _to_node(self, data):
+ state = NODE_STATE_MAP[data['status']]
+ public_ips = []
+ private_ips = []
+ extra = {}
+
+ if 'plan_id' in data:
+ extra['size'] = data['plan_id']
+ if 'os_id' in data:
+ extra['image'] = data['os_id']
+ if 'location_id' in data:
+ extra['location'] = data['location_id']
+ if 'ip' in data:
+ public_ips.append(data['ip'])
+
+ node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state,
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self.connection.driver, extra=extra)
+ return node
+
+ def list_locations(self):
+ result = self.connection.request(API_ROOT + '/cloud/locations/').object
+ locations = []
+ for dc in result:
+ locations.append(NodeLocation(
+ dc["id"],
+ dc["name"],
+ dc["name"].split(',')[1].replace(" ", ""), # country
+ self))
+ return locations
+
+ def list_sizes(self, location=None):
+ params = {}
+ if location:
+ params = {'location': location.id}
+ result = self.connection.request(
+ API_ROOT + '/cloud/sizes/',
+ data=json.dumps(params)).object
+ sizes = []
+ for size in result:
+ n = NodeSize(id=size['plan_id'],
+ name=size['plan'],
+ ram=size['ram'],
+ disk=size['disk'],
+ bandwidth=size['transfer'],
+ price=size['price'],
+ driver=self.connection.driver)
+ sizes.append(n)
+ return sizes
+
+ def list_images(self):
+ result = self.connection.request(API_ROOT + '/cloud/images/').object
+ images = []
+ for image in result:
+ i = NodeImage(id=image["id"],
+ name=image["os"],
+ driver=self.connection.driver,
+ extra=image)
+ del i.extra['id']
+ del i.extra['os']
+ images.append(i)
+ return images
+
+ def list_nodes(self):
+ result = self.connection.request(API_ROOT + '/cloud/servers/').object
+ nodes = []
+ for value in result:
+ node = self._to_node(value)
+ nodes.append(node)
+ return nodes
+
+ def _wait_for_node(self, node_id, timeout=30, interval=5.0):
+ """
+ :param node_id: ID of the node to wait for.
+ :type node_id: ``int``
+
+ :param timeout: Timeout (in seconds).
+ :type timeout: ``int``
+
+ :param interval: How long to wait (in seconds) between each attempt.
+ :type interval: ``float``
+ """
+ # poll until we get a node
+ for i in range(0, timeout, int(interval)):
+ try:
+ node = self.ex_get_node(node_id)
+ return node
+ except HostVirtualException:
+ time.sleep(interval)
+
+ raise HostVirtualException(412, 'Timedout on getting node details')
+
+ def create_node(self, **kwargs):
+ dc = None
+
+ size = kwargs['size']
+ image = kwargs['image']
+
+ auth = self._get_and_check_auth(kwargs.get('auth'))
+
+ params = {'plan': size.name}
+
+ dc = DEFAULT_NODE_LOCATION_ID
+ if 'location' in kwargs:
+ dc = kwargs['location'].id
+
+ # simply order a package first
+ result = self.connection.request(API_ROOT + '/cloud/buy/',
+ data=json.dumps(params),
+ method='POST').object
+
+ # create a stub node
+ stub_node = self._to_node({
+ 'mbpkgid': result['id'],
+ 'status': 'PENDING',
+ 'fqdn': kwargs['name'],
+ 'plan_id': size.id,
+ 'os_id': image.id,
+ 'location_id': dc
+ })
+
+ # provisioning a server using the stub node
+ self.ex_provision_node(node=stub_node, auth=auth)
+ node = self._wait_for_node(stub_node.id)
+
+ if getattr(auth, 'generated', False):
+ node.extra['password'] = auth.password
+
+ return node
+
+ def reboot_node(self, node):
+ params = {'force': 0, 'mbpkgid': node.id}
+ result = self.connection.request(
+ API_ROOT + '/cloud/server/reboot',
+ data=json.dumps(params),
+ method='POST').object
+
+ return bool(result)
+
+ def destroy_node(self, node):
+ params = {
+ 'mbpkgid': node.id,
+ # 'reason': 'Submitted through Libcloud API'
+ }
+
+ result = self.connection.request(
+ API_ROOT + '/cloud/cancel', data=json.dumps(params),
+ method='POST').object
+
+ return bool(result)
+
+ def ex_get_node(self, node_id):
+ """
+ Get a single node.
+
+ :param node_id: id of the node that we need the node object for
+ :type node_id: ``str``
+
+ :rtype: :class:`Node`
+ """
+
+ params = {'mbpkgid': node_id}
+ result = self.connection.request(
+ API_ROOT + '/cloud/server', params=params).object
+ node = self._to_node(result)
+ return node
+
+ def ex_stop_node(self, node):
+ """
+ Stop a node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ params = {'force': 0, 'mbpkgid': node.id}
+ result = self.connection.request(
+ API_ROOT + '/cloud/server/shutdown',
+ data=json.dumps(params),
+ method='POST').object
+
+ return bool(result)
+
+ def ex_start_node(self, node):
+ """
+ Start a node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ params = {'mbpkgid': node.id}
+ result = self.connection.request(
+ API_ROOT + '/cloud/server/start',
+ data=json.dumps(params),
+ method='POST').object
+
+ return bool(result)
+
+ def ex_provision_node(self, **kwargs):
+ """
+ Provision a server on a VR package and get it booted
+
+ :keyword node: node which should be used
+ :type node: :class:`Node`
+
+ :keyword image: The distribution to deploy on your server (mandatory)
+ :type image: :class:`NodeImage`
+
+ :keyword auth: an SSH key or root password (mandatory)
+ :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
+
+ :keyword location: which datacenter to create the server in
+ :type location: :class:`NodeLocation`
+
+ :return: Node representing the newly built server
+ :rtype: :class:`Node`
+ """
+
+ node = kwargs['node']
+
+ if 'image' in kwargs:
+ image = kwargs['image']
+ else:
+ image = node.extra['image']
+
+ params = {
+ 'mbpkgid': node.id,
+ 'image': image,
+ 'fqdn': node.name,
+ 'location': node.extra['location'],
+ }
+
+ auth = kwargs['auth']
+
+ ssh_key = None
+ password = None
+ if isinstance(auth, NodeAuthSSHKey):
+ ssh_key = auth.pubkey
+ params['ssh_key'] = ssh_key
+ elif isinstance(auth, NodeAuthPassword):
+ password = auth.password
+ params['password'] = password
+
+ if not ssh_key and not password:
+ raise HostVirtualException(500, "Need SSH key or Root password")
+
+ result = self.connection.request(API_ROOT + '/cloud/server/build',
+ data=json.dumps(params),
+ method='POST').object
+ return bool(result)
+
+ def ex_delete_node(self, node):
+ """
+ Delete a node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+
+ params = {'mbpkgid': node.id}
+ result = self.connection.request(
+ API_ROOT + '/cloud/server/delete', data=json.dumps(params),
+ method='POST').object
+
+ return bool(result)
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/hpcloud.py b/awx/lib/site-packages/libcloud/compute/drivers/hpcloud.py
new file mode 100644
index 0000000000..97de03e925
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/hpcloud.py
@@ -0,0 +1,99 @@
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+HP Public cloud driver which is esentially just a small wrapper around
+OpenStack driver.
+"""
+
+from libcloud.compute.types import Provider, LibcloudError
+from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
+from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver
+
+
+__all__ = [
+ 'HPCloudNodeDriver'
+]
+
+ENDPOINT_ARGS_MAP = {
+ 'region-a.geo-1': {
+ 'service_type': 'compute',
+ 'name': 'Compute',
+ 'region': 'region-a.geo-1'
+ },
+ 'region-b.geo-1': {
+ 'service_type': 'compute',
+ 'name': 'Compute',
+ 'region': 'region-b.geo-1'
+ },
+}
+
+AUTH_URL_TEMPLATE = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens'
+
+
+class HPCloudConnection(OpenStack_1_1_Connection):
+ _auth_version = '2.0_password'
+
+ def __init__(self, *args, **kwargs):
+ self.region = kwargs.pop('region', None)
+ self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
+ super(HPCloudConnection, self).__init__(*args, **kwargs)
+
+ def get_endpoint(self):
+ if not self.get_endpoint_args:
+ raise LibcloudError(
+ 'HPCloudConnection must have get_endpoint_args set')
+
+ if '2.0_password' in self._auth_version:
+ ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
+ else:
+ raise LibcloudError(
+ 'Auth version "%s" not supported' % (self._auth_version))
+
+ public_url = ep.get('publicURL', None)
+
+ if not public_url:
+ raise LibcloudError('Could not find specified endpoint')
+
+ return public_url
+
+
+class HPCloudNodeDriver(OpenStack_1_1_NodeDriver):
+ name = 'HP Public Cloud (Helion)'
+ website = 'http://www.hpcloud.com/'
+ connectionCls = HPCloudConnection
+ type = Provider.HPCLOUD
+
+ def __init__(self, key, secret, tenant_name, secure=True,
+ host=None, port=None, region='region-b.geo-1', **kwargs):
+ """
+ Note: tenant_name argument is required for HP cloud.
+ """
+ self.tenant_name = tenant_name
+ super(HPCloudNodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port,
+ region=region,
+ **kwargs)
+
+ def _ex_connection_class_kwargs(self):
+ endpoint_args = ENDPOINT_ARGS_MAP[self.region]
+
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['region'] = self.region
+ kwargs['get_endpoint_args'] = endpoint_args
+ kwargs['ex_force_auth_url'] = AUTH_URL_TEMPLATE % (self.region)
+ kwargs['ex_tenant_name'] = self.tenant_name
+
+ return kwargs
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/ibm_sce.py b/awx/lib/site-packages/libcloud/compute/drivers/ibm_sce.py
new file mode 100644
index 0000000000..bcf73b9a8d
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/ibm_sce.py
@@ -0,0 +1,753 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Driver for IBM SmartCloud Enterprise
+
+Formerly known as:
+- IBM Developer Cloud
+- IBM Smart Business Development and Test on the IBM Cloud
+- IBM SmartBusiness Cloud
+"""
+
+import base64
+import time
+
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+from libcloud.common.base import XmlResponse, ConnectionUserAndKey
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.compute.types import NodeState, Provider
+from libcloud.compute.base import NodeDriver, Node, NodeImage, \
+ NodeSize, NodeLocation, NodeAuthSSHKey, StorageVolume
+
+HOST = 'www-147.ibm.com'
+REST_BASE = '/computecloud/enterprise/api/rest/20100331'
+
+
+class IBMResponse(XmlResponse):
+ def success(self):
+ return int(self.status) == 200
+
+ def parse_error(self):
+ if int(self.status) == 401:
+ if not self.body:
+ raise InvalidCredsError(str(self.status) + ': ' + self.error)
+ else:
+ raise InvalidCredsError(self.body)
+ return self.body
+
+
+class IBMConnection(ConnectionUserAndKey):
+ """
+ Connection class for the IBM SmartCloud Enterprise driver
+ """
+
+ host = HOST
+ responseCls = IBMResponse
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'text/xml'
+ headers['Authorization'] = ('Basic %s' % (base64.b64encode(
+ b('%s:%s' % (self.user_id, self.key))).decode('utf-8')))
+ if 'Content-Type' not in headers:
+ headers['Content-Type'] = 'text/xml'
+ return headers
+
+ def encode_data(self, data):
+ return urlencode(data)
+
+
+class IBMNodeLocation(NodeLocation):
+ """
+ Extends the base LibCloud NodeLocation to contain additional attributes
+ """
+ def __init__(self, id, name, country, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.country = country
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return ('' %
+ (self.id, self.name, self.country, self.driver.name,
+ self.extra))
+
+
+class VolumeState(object):
+ """
+ The SCE specific states for a storage volume
+ """
+ NEW = '0'
+ CREATING = '1'
+ DELETING = '2'
+ DELETED = '3'
+ DETACHED = '4'
+ ATTACHED = '5'
+ FAILED = '6'
+ DELETE_PENDING = '7'
+ BEING_CLONED = '8'
+ CLONING = '9'
+ ATTACHING = '10'
+ DETACHING = '11'
+ ATTACHIED = '12'
+ IMPORTING = '13'
+ TRANSFER_RETRYING = '14'
+
+
+class VolumeOffering(object):
+ """
+ An SCE specific storage volume offering class.
+ The volume offering ID is needed to create a volume.
+ Volume offering IDs are different for each data center.
+ """
+ def __init__(self, id, name, location, extra=None):
+ self.id = id
+ self.location = location
+ self.name = name
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return ('' %
+ (self.id, self.location, self.name, self.extra))
+
+
+class Address(object):
+ """
+ A reserved IP address that can be attached to an instance.
+ Properties: id, ip, state, options(location, type, created_time, state,
+ hostname, instance_ids, vlan, owner,
+ mode, offering_id)
+ """
+ def __init__(self, id, ip, state, options):
+ self.id = id
+ self.ip = ip
+ self.state = state
+ self.options = options
+
+ def __repr__(self):
+ return ('' %
+ (self.id, self.ip, self.state, self.options))
+
+
+class IBMNodeDriver(NodeDriver):
+ """
+ Node driver for IBM SmartCloud Enterprise
+ """
+ connectionCls = IBMConnection
+ type = Provider.IBM
+ name = "IBM SmartCloud Enterprise"
+ website = 'http://ibm.com/services/us/en/cloud-enterprise/'
+
+ NODE_STATE_MAP = {
+ 0: NodeState.PENDING, # New
+ 1: NodeState.PENDING, # Provisioning
+ 2: NodeState.TERMINATED, # Failed
+ 3: NodeState.TERMINATED, # Removed
+ 4: NodeState.TERMINATED, # Rejected
+ 5: NodeState.RUNNING, # Active
+ 6: NodeState.UNKNOWN, # Unknown
+ 7: NodeState.PENDING, # Deprovisioning
+ 8: NodeState.REBOOTING, # Restarting
+ 9: NodeState.PENDING, # Starting
+ 10: NodeState.PENDING, # Stopping
+ 11: NodeState.TERMINATED, # Stopped
+ 12: NodeState.PENDING, # Deprovision Pending
+ 13: NodeState.PENDING, # Restart Pending
+ 14: NodeState.PENDING, # Attaching
+ 15: NodeState.PENDING, # Detaching
+ }
+
+ def create_node(self, **kwargs):
+ """
+ Creates a node in the IBM SmartCloud Enterprise.
+
+ See :class:`NodeDriver.create_node` for more keyword args.
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword auth: Name of the pubkey to use. When constructing
+ :class:`NodeAuthSSHKey` instance, 'pubkey' argument must be the
+ name of the public key to use. You chose this name when creating
+ a new public key on the IBM server.
+ :type auth: :class:`NodeAuthSSHKey`
+
+ :keyword ex_configurationData: Image-specific configuration
+ parameters. Configuration parameters are defined in the parameters
+ .xml file. The URL to this file is defined in the NodeImage at
+ extra[parametersURL].
+ Note: This argument must be specified when launching a Windows
+ instance. It must contain 'UserName' and 'Password' keys.
+ :type ex_configurationData: ``dict``
+ """
+
+ # Compose headers for message body
+ data = {}
+ data.update({'name': kwargs['name']})
+ data.update({'imageID': kwargs['image'].id})
+ data.update({'instanceType': kwargs['size'].id})
+ if 'location' in kwargs:
+ data.update({'location': kwargs['location'].id})
+ else:
+ data.update({'location': '1'})
+ if 'auth' in kwargs and isinstance(kwargs['auth'], NodeAuthSSHKey):
+ data.update({'publicKey': kwargs['auth'].pubkey})
+ if 'ex_configurationData' in kwargs:
+ configurationData = kwargs['ex_configurationData']
+ if configurationData:
+ for key in configurationData.keys():
+ data.update({key: configurationData.get(key)})
+
+ # Send request!
+ resp = self.connection.request(
+ action=REST_BASE + '/instances',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'},
+ method='POST',
+ data=data).object
+ return self._to_nodes(resp)[0]
+
+ def create_volume(self, size, name, location, **kwargs):
+ """
+ Create a new block storage volume (virtual disk)
+
+ :param size: Size of volume in gigabytes (required).
+ Find out the possible sizes from the
+ offerings/storage REST interface
+ :type size: ``int``
+
+ :keyword name: Name of the volume to be created (required)
+ :type name: ``str``
+
+ :keyword location: Which data center to create a volume in. If
+ empty, it will fail for IBM SmartCloud Enterprise
+ (required)
+ :type location: :class:`NodeLocation`
+
+ :keyword snapshot: Not supported for IBM SmartCloud Enterprise
+ :type snapshot: ``str``
+
+ :keyword kwargs.format: Either RAW or EXT3 for IBM SmartCloud
+ Enterprise (optional)
+ :type kwargs.format: ``str``
+
+ :keyword kwargs.offering_id: The storage offering ID for IBM
+ SmartCloud Enterprise
+ Find this from the REST interface
+ storage/offerings. (optional)
+ :type kwargs.offering_id: ``str``
+
+ :keyword kwargs.source_disk_id: If cloning a volume, the storage
+ disk to make a copy from (optional)
+ :type kwargs.source_disk_id: ``str``
+
+ :keyword kwargs.storage_area_id: The id of the storage availability
+ area to create the volume in
+ (optional)
+ :type kwargs.storage_area_id: ``str``
+
+ :keyword kwargs.target_location_id: If cloning a volume, the
+ storage disk to make a copy
+ from (optional)
+ :type kwargs.target_location_id: ``str``
+
+ :return: The newly created :class:`StorageVolume`.
+ :rtype: :class:`StorageVolume`
+ """
+ data = {}
+ data.update({'name': name})
+ data.update({'size': size})
+ data.update({'location': location})
+ if (('format' in kwargs) and (kwargs['format'] is not None)):
+ data.update({'format': kwargs['format']})
+ if (('offering_id' in kwargs) and (kwargs['offering_id'] is not None)):
+ data.update({'offeringID': kwargs['offering_id']})
+ if (('storage_area_id' in kwargs) and
+ (kwargs['storage_area_id'] is not None)):
+ data.update({'storageAreaID': kwargs['storage_area_id']})
+ if 'source_disk_id' in kwargs:
+ data.update({'sourceDiskID': kwargs['source_disk_id']})
+ data.update({'type': 'clone'})
+ if 'target_location_id' in kwargs:
+ data.update({'targetLocationID': kwargs['target_location_id']})
+ resp = self.connection.request(
+ action=REST_BASE + '/storage',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'},
+ method='POST',
+ data=data).object
+ return self._to_volumes(resp)[0]
+
+ def create_image(self, name, description=None, **kwargs):
+ """
+ Create a new node image from an existing volume or image.
+
+ :param name: Name of the image to be created (required)
+ :type name: ``str``
+
+ :param description: Description of the image to be created
+ :type description: ``str``
+
+ :keyword image_id: The ID of the source image if cloning the image
+ :type image_id: ``str``
+
+ :keyword volume_id: The ID of the storage volume if
+ importing the image
+ :type volume_id: ``str``
+
+ :return: The newly created :class:`NodeImage`.
+ :rtype: :class:`NodeImage`
+ """
+ data = {}
+ data.update({'name': name})
+ if description is not None:
+ data.update({'description': description})
+ if (('image_id' in kwargs) and (kwargs['image_id'] is not None)):
+ data.update({'imageId': kwargs['image_id']})
+ if (('volume_id' in kwargs) and (kwargs['volume_id'] is not None)):
+ data.update({'volumeId': kwargs['volume_id']})
+ resp = self.connection.request(
+ action=REST_BASE + '/offerings/image',
+ headers={'Content-Type': 'application/x-www-form-urlencoded'},
+ method='POST',
+ data=data).object
+ return self._to_images(resp)[0]
+
+ def destroy_node(self, node):
+ url = REST_BASE + '/instances/%s' % (node.id)
+ status = int(self.connection.request(action=url,
+ method='DELETE').status)
+ return status == httplib.OK
+
+ def destroy_volume(self, volume):
+ """
+ Destroys a storage volume.
+
+ :param volume: Volume to be destroyed
+ :type volume: :class:`StorageVolume`
+
+ :rtype: ``bool``
+ """
+ url = REST_BASE + '/storage/%s' % (volume.id)
+ status = int(self.connection.request(action=url,
+ method='DELETE').status)
+ return status == httplib.OK
+
+ def ex_destroy_image(self, image):
+ """
+ Destroys an image.
+
+ :param image: Image to be destroyed
+ :type image: :class:`NodeImage`
+
+ :return: ``bool``
+ """
+
+ url = REST_BASE + '/offerings/image/%s' % (image.id)
+ status = int(self.connection.request(action=url,
+ method='DELETE').status)
+ return status == 200
+
+ def attach_volume(self, node, volume):
+ """
+ Attaches volume to node.
+
+ :param node: Node to attach volume to
+ :type node: :class:`Node`
+
+ :param volume: Volume to attach
+ :type volume: :class:`StorageVolume`
+
+ :rtype: ``bool``
+ """
+ url = REST_BASE + '/instances/%s' % (node.id)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ data = {'storageID': volume.id, 'type': 'attach'}
+ resp = self.connection.request(action=url,
+ method='PUT',
+ headers=headers,
+ data=data)
+ return int(resp.status) == 200
+
+ def detach_volume(self, node, volume):
+ """
+ Detaches a volume from a node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :param volume: Volume to be detached
+ :type volume: :class:`StorageVolume`
+
+ :rtype: ``bool``
+ """
+ url = REST_BASE + '/instances/%s' % (node.id)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ data = {'storageID': volume.id, 'type': 'detach'}
+ resp = self.connection.request(action=url,
+ method='PUT',
+ headers=headers,
+ data=data)
+ return int(resp.status) == 200
+
+ def reboot_node(self, node):
+ url = REST_BASE + '/instances/%s' % (node.id)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ data = {'state': 'restart'}
+
+ resp = self.connection.request(action=url,
+ method='PUT',
+ headers=headers,
+ data=data)
+ return int(resp.status) == 200
+
+ def list_nodes(self):
+ return self._to_nodes(
+ self.connection.request(REST_BASE + '/instances').object)
+
+ def list_images(self, location=None):
+ return self._to_images(
+ self.connection.request(REST_BASE + '/offerings/image').object)
+
+ def list_volumes(self):
+ """
+ List storage volumes.
+
+ :rtype: ``list`` of :class:`StorageVolume`
+ """
+ return self._to_volumes(
+ self.connection.request(REST_BASE + '/storage').object)
+
+ def list_sizes(self, location=None):
+ """
+ Returns a generic list of sizes. See list_images() for a list of
+ supported sizes for specific images. In particular, you need to have
+ a size that matches the architecture (32-bit vs 64-bit) of the virtual
+ machine image operating system.
+
+ @inherits: :class:`NodeDriver.list_sizes`
+ """
+ return [
+ NodeSize('BRZ32.1/2048/60*175', 'Bronze 32 bit', None, None, None,
+ None, self.connection.driver),
+ NodeSize('BRZ64.2/4096/60*500*350', 'Bronze 64 bit', None, None,
+ None, None, self.connection.driver),
+ NodeSize('COP32.1/2048/60', 'Copper 32 bit', None, None, None,
+ None, self.connection.driver),
+ NodeSize('COP64.2/4096/60', 'Copper 64 bit', None, None, None,
+ None, self.connection.driver),
+ NodeSize('SLV32.2/4096/60*350', 'Silver 32 bit', None, None, None,
+ None, self.connection.driver),
+ NodeSize('SLV64.4/8192/60*500*500', 'Silver 64 bit', None, None,
+ None, None, self.connection.driver),
+ NodeSize('GLD32.4/4096/60*350', 'Gold 32 bit', None, None, None,
+ None, self.connection.driver),
+ NodeSize('GLD64.8/16384/60*500*500', 'Gold 64 bit', None, None,
+ None, None, self.connection.driver),
+ NodeSize('PLT64.16/16384/60*500*500*500*500', 'Platinum 64 bit',
+ None, None, None, None, self.connection.driver)]
+
+ def list_locations(self):
+ return self._to_locations(
+ self.connection.request(REST_BASE + '/locations').object)
+
+ def ex_list_storage_offerings(self):
+ """
+ List the storage center offerings
+
+ :rtype: ``list`` of :class:`VolumeOffering`
+ """
+ return self._to_volume_offerings(
+ self.connection.request(REST_BASE + '/offerings/storage').object)
+
+ def ex_allocate_address(self, location_id, offering_id, vlan_id=None):
+ """
+ Allocate a new reserved IP address
+
+ :param location_id: Target data center
+ :type location_id: ``str``
+
+ :param offering_id: Offering ID for address to create
+ :type offering_id: ``str``
+
+ :param vlan_id: ID of target VLAN
+ :type vlan_id: ``str``
+
+ :return: :class:`Address` object
+ :rtype: :class:`Address`
+ """
+ url = REST_BASE + '/addresses'
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ data = {'location': location_id, 'offeringID': offering_id}
+ if vlan_id is not None:
+ data.update({'vlanID': vlan_id})
+ resp = self.connection.request(action=url,
+ method='POST',
+ headers=headers,
+ data=data).object
+ return self._to_addresses(resp)[0]
+
+ def ex_list_addresses(self, resource_id=None):
+ """
+ List the reserved IP addresses
+
+ :param resource_id: If this is supplied only a single address will
+ be returned (optional)
+ :type resource_id: ``str``
+
+ :rtype: ``list`` of :class:`Address`
+ """
+ url = REST_BASE + '/addresses'
+ if resource_id:
+ url += '/' + resource_id
+ return self._to_addresses(self.connection.request(url).object)
+
+ def ex_copy_to(self, image, volume):
+ """
+ Copies a node image to a storage volume
+
+ :param image: source image to copy
+ :type image: :class:`NodeImage`
+
+ :param volume: Target storage volume to copy to
+ :type volume: :class:`StorageVolume`
+
+ :return: ``bool`` The success of the operation
+ :rtype: ``bool``
+ """
+ url = REST_BASE + '/storage/%s' % (volume.id)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ data = {'imageId': image.id}
+ resp = self.connection.request(action=url,
+ method='PUT',
+ headers=headers,
+ data=data)
+ return int(resp.status) == 200
+
+ def ex_delete_address(self, resource_id):
+ """
+ Delete a reserved IP address
+
+ :param resource_id: The address to delete (required)
+ :type resource_id: ``str``
+
+ :rtype: ``bool``
+ """
+ url = REST_BASE + '/addresses/' + resource_id
+ status = int(self.connection.request(action=url,
+ method='DELETE').status)
+ return status == 200
+
+ def ex_wait_storage_state(self, volume, state=VolumeState.DETACHED,
+ wait_period=60, timeout=1200):
+ """
+ Block until storage volume state changes to the given value
+
+ :param volume: Storage volume.
+ :type volume: :class:`StorageVolume`
+
+ :param state: The target state to wait for
+ :type state: ``int``
+
+ :param wait_period: How many seconds to between each loop
+ iteration (default is 3)
+ :type wait_period: ``int``
+
+ :param timeout: How many seconds to wait before timing out
+ (default is 1200)
+ :type timeout: ``int``
+
+ :rtype: :class:`StorageVolume`
+ """
+ start = time.time()
+ end = start + timeout
+
+ while time.time() < end:
+ volumes = self.list_volumes()
+ volumes = list([v for v in volumes if v.uuid == volume.uuid])
+
+ if (len(volumes) == 1 and volumes[0].extra['state'] == state):
+ return volumes[0]
+ else:
+ time.sleep(wait_period)
+ continue
+
+ raise LibcloudError(value='Timed out after %d seconds' % (timeout),
+ driver=self)
+
+ def _to_nodes(self, object):
+ return [self._to_node(instance) for instance in
+ object.findall('Instance')]
+
+ def _to_node(self, instance):
+ public_ips = []
+
+ ip = instance.findtext('IP')
+ if ip:
+ public_ips.append(ip)
+
+ return Node(
+ id=instance.findtext('ID'),
+ name=instance.findtext('Name'),
+ state=self.NODE_STATE_MAP[int(instance.findtext('Status'))],
+ public_ips=public_ips,
+ private_ips=[],
+ driver=self.connection.driver
+ )
+
+ def _to_images(self, object):
+ # Converts data retrieved from SCE /offerings/image REST call to
+ # a NodeImage
+ return [self._to_image(image) for image in object.findall('Image')]
+
+ def _to_image(self, image):
+ # Converts an SCE Image object to a NodeImage
+ imageID = image.findtext('ID')
+ imageName = image.findtext('Name')
+ parametersURL = image.findtext('Manifest')
+ location = image.findtext('Location')
+ state = image.findtext('State')
+ owner = image.findtext('Owner')
+ visibility = image.findtext('Visibility')
+ platform = image.findtext('Platform')
+ description = image.findtext('Description')
+ documentation = image.findtext('Documentation')
+ instanceTypes = image.findall('SupportedInstanceTypes')
+ nodeSizes = self._to_node_sizes(image.find('SupportedInstanceTypes'))
+ return NodeImage(id=imageID,
+ name=imageName,
+ driver=self.connection.driver,
+ extra={
+ 'parametersURL': parametersURL,
+ 'location': location,
+ 'state': state,
+ 'owner': owner,
+ 'visibility': visibility,
+ 'platform': platform,
+ 'description': description,
+ 'documentation': documentation,
+ 'instanceTypes': instanceTypes,
+ 'node_sizes': nodeSizes
+ }
+ )
+
+ def _to_locations(self, object):
+ return [self._to_location(location) for location in
+ object.findall('Location')]
+
+ def _to_location(self, location):
+ # Converts an SCE Location object to a Libcloud NodeLocation object
+ name_text = location.findtext('Name')
+ description = location.findtext('Description')
+ state = location.findtext('State')
+ (nameVal, separator, countryVal) = name_text.partition(',')
+ capabiltyElements = location.findall('Capabilities/Capability')
+ capabilities = {}
+ for elem in capabiltyElements:
+ capabilityID = elem.attrib['id']
+ entryElements = elem.findall('Entry')
+ entries = []
+ for entryElem in entryElements:
+ key = entryElem.attrib['key']
+ valueElements = elem.findall('Value')
+ values = []
+ for valueElem in valueElements:
+ values.append(valueElem.text)
+ entry = {'key': key, 'values': values}
+ entries.append(entry)
+ capabilities[capabilityID] = entries
+ extra = {'description': description, 'state': state,
+ 'capabilities': capabilities}
+ return IBMNodeLocation(id=location.findtext('ID'),
+ name=nameVal,
+ country=countryVal.strip(),
+ driver=self.connection.driver,
+ extra=extra)
+
+ def _to_node_sizes(self, object):
+ # Converts SCE SupportedInstanceTypes object to
+ # a list of Libcloud NodeSize objects
+ return [self._to_node_size(iType) for iType in
+ object.findall('InstanceType')]
+
+ def _to_node_size(self, object):
+ # Converts to an SCE InstanceType to a Libcloud NodeSize
+ return NodeSize(object.findtext('ID'),
+ object.findtext('Label'),
+ None,
+ None,
+ None,
+ object.findtext('Price/Rate'),
+ self.connection.driver)
+
+ def _to_volumes(self, object):
+ return [self._to_volume(iType) for iType in
+ object.findall('Volume')]
+
+ def _to_volume(self, object):
+ # Converts an SCE Volume to a Libcloud StorageVolume
+ extra = {'state': object.findtext('State'),
+ 'location': object.findtext('Location'),
+ 'instanceID': object.findtext('instanceID'),
+ 'owner': object.findtext('Owner'),
+ 'format': object.findtext('Format'),
+ 'createdTime': object.findtext('CreatedTime'),
+ 'storageAreaID': object.findtext('StorageArea/ID')}
+ return StorageVolume(object.findtext('ID'),
+ object.findtext('Name'),
+ object.findtext('Size'),
+ self.connection.driver,
+ extra)
+
+ def _to_volume_offerings(self, object):
+ return [self._to_volume_offering(iType) for iType in
+ object.findall('Offerings')]
+
+ def _to_volume_offering(self, object):
+ # Converts an SCE DescribeVolumeOfferingsResponse/Offerings XML object
+ # to an SCE VolumeOffering
+ extra = {'label': object.findtext('Label'),
+ 'supported_sizes': object.findtext('SupportedSizes'),
+ 'formats': object.findall('SupportedFormats/Format/ID'),
+ 'price': object.findall('Price')}
+ return VolumeOffering(object.findtext('ID'),
+ object.findtext('Name'),
+ object.findtext('Location'),
+ extra)
+
+ def _to_addresses(self, object):
+ # Converts an SCE DescribeAddressesResponse XML object to a list of
+ # Address objects
+ return [self._to_address(iType) for iType in
+ object.findall('Address')]
+
+ def _to_address(self, object):
+ # Converts an SCE DescribeAddressesResponse/Address XML object to
+ # an Address object
+ extra = {'location': object.findtext('Location'),
+ 'type': object.findtext('Label'),
+ 'created_time': object.findtext('SupportedSizes'),
+ 'hostname': object.findtext('Hostname'),
+ 'instance_ids': object.findtext('InstanceID'),
+ 'vlan': object.findtext('VLAN'),
+ 'owner': object.findtext('owner'),
+ 'mode': object.findtext('Mode'),
+ 'offering_id': object.findtext('OfferingID')}
+ return Address(object.findtext('ID'),
+ object.findtext('IP'),
+ object.findtext('State'),
+ extra)
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/ikoula.py b/awx/lib/site-packages/libcloud/compute/drivers/ikoula.py
new file mode 100644
index 0000000000..554c647b95
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/ikoula.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.compute.providers import Provider
+from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
+
+__all__ = [
+ 'IkoulaNodeDriver'
+]
+
+
+class IkoulaNodeDriver(CloudStackNodeDriver):
+ type = Provider.IKOULA
+ name = 'Ikoula'
+ website = 'http://express.ikoula.co.uk/cloudstack'
+
+ # API endpoint info
+ host = 'cloudstack.ikoula.com'
+ path = '/client/api'
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/joyent.py b/awx/lib/site-packages/libcloud/compute/drivers/joyent.py
new file mode 100644
index 0000000000..6d8142e8f8
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/joyent.py
@@ -0,0 +1,222 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Joyent Cloud (http://www.joyentcloud.com) driver.
+"""
+
+import base64
+
+try:
+ import simplejson as json
+except:
+ import json
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+from libcloud.common.types import LibcloudError
+from libcloud.compute.providers import Provider
+from libcloud.common.base import JsonResponse, ConnectionUserAndKey
+from libcloud.compute.types import NodeState, InvalidCredsError
+from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize
+from libcloud.utils.networking import is_private_subnet
+
+API_HOST_SUFFIX = '.api.joyentcloud.com'
+API_VERSION = '~6.5'
+
+
+NODE_STATE_MAP = {
+ 'provisioning': NodeState.PENDING,
+ 'running': NodeState.RUNNING,
+ 'stopping': NodeState.TERMINATED,
+ 'stopped': NodeState.TERMINATED,
+ 'deleted': NodeState.TERMINATED
+}
+
+VALID_REGIONS = ['us-east-1', 'us-west-1', 'us-sw-1', 'eu-ams-1']
+DEFAULT_REGION = 'us-east-1'
+
+
+class JoyentResponse(JsonResponse):
+ """
+ Joyent response class.
+ """
+
+ valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
+ httplib.NO_CONTENT]
+
+ def parse_error(self):
+ if self.status == httplib.UNAUTHORIZED:
+ data = self.parse_body()
+ raise InvalidCredsError(data['code'] + ': ' + data['message'])
+ return self.body
+
+ def success(self):
+ return self.status in self.valid_response_codes
+
+
+class JoyentConnection(ConnectionUserAndKey):
+ """
+ Joyent connection class.
+ """
+
+ responseCls = JoyentResponse
+
+ allow_insecure = False
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'application/json'
+ headers['Content-Type'] = 'application/json; charset=UTF-8'
+ headers['X-Api-Version'] = API_VERSION
+
+ user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
+ headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
+ return headers
+
+
+class JoyentNodeDriver(NodeDriver):
+ """
+ Joyent node driver class.
+ """
+
+ type = Provider.JOYENT
+ name = 'Joyent'
+ website = 'http://www.joyentcloud.com'
+ connectionCls = JoyentConnection
+ features = {'create_node': ['generates_password']}
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region=DEFAULT_REGION, **kwargs):
+ # Location is here for backward compatibility reasons
+ if 'location' in kwargs:
+ region = kwargs['location']
+
+ if region not in VALID_REGIONS:
+ msg = 'Invalid region: "%s". Valid region: %s'
+ raise LibcloudError(msg % (region,
+ ', '.join(VALID_REGIONS)), driver=self)
+
+ super(JoyentNodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, region=region,
+ **kwargs)
+ self.connection.host = region + API_HOST_SUFFIX
+
+ def list_images(self):
+ result = self.connection.request('/my/datasets').object
+
+ images = []
+ for value in result:
+ extra = {'type': value['type'], 'urn': value['urn'],
+ 'os': value['os'], 'default': value['default']}
+ image = NodeImage(id=value['id'], name=value['name'],
+ driver=self.connection.driver, extra=extra)
+ images.append(image)
+
+ return images
+
+ def list_sizes(self):
+ result = self.connection.request('/my/packages').object
+
+ sizes = []
+ for value in result:
+ size = NodeSize(id=value['name'], name=value['name'],
+ ram=value['memory'], disk=value['disk'],
+ bandwidth=None, price=0.0,
+ driver=self.connection.driver)
+ sizes.append(size)
+
+ return sizes
+
+ def list_nodes(self):
+ result = self.connection.request('/my/machines').object
+
+ nodes = []
+ for value in result:
+ node = self._to_node(value)
+ nodes.append(node)
+
+ return nodes
+
+ def reboot_node(self, node):
+ data = json.dumps({'action': 'reboot'})
+ result = self.connection.request('/my/machines/%s' % (node.id),
+ data=data, method='POST')
+ return result.status == httplib.ACCEPTED
+
+ def destroy_node(self, node):
+ result = self.connection.request('/my/machines/%s' % (node.id),
+ method='DELETE')
+ return result.status == httplib.NO_CONTENT
+
+ def create_node(self, **kwargs):
+ name = kwargs['name']
+ size = kwargs['size']
+ image = kwargs['image']
+
+ data = json.dumps({'name': name, 'package': size.id,
+ 'dataset': image.id})
+ result = self.connection.request('/my/machines', data=data,
+ method='POST')
+ return self._to_node(result.object)
+
+ def ex_stop_node(self, node):
+ """
+ Stop node
+
+ :param node: The node to be stopped
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ data = json.dumps({'action': 'stop'})
+ result = self.connection.request('/my/machines/%s' % (node.id),
+ data=data, method='POST')
+ return result.status == httplib.ACCEPTED
+
+ def ex_start_node(self, node):
+ """
+ Start node
+
+ :param node: The node to be stopped
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ data = json.dumps({'action': 'start'})
+ result = self.connection.request('/my/machines/%s' % (node.id),
+ data=data, method='POST')
+ return result.status == httplib.ACCEPTED
+
+ def _to_node(self, data):
+ state = NODE_STATE_MAP[data['state']]
+ public_ips = []
+ private_ips = []
+ extra = {}
+
+ for ip in data['ips']:
+ if is_private_subnet(ip):
+ private_ips.append(ip)
+ else:
+ public_ips.append(ip)
+
+ if 'credentials' in data['metadata']:
+ extra['password'] = data['metadata']['credentials']['root']
+
+ node = Node(id=data['id'], name=data['name'], state=state,
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self.connection.driver, extra=extra)
+ return node
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/kili.py b/awx/lib/site-packages/libcloud/compute/drivers/kili.py
new file mode 100644
index 0000000000..de610676e6
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/kili.py
@@ -0,0 +1,87 @@
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+HP Public cloud driver which is esentially just a small wrapper around
+OpenStack driver.
+"""
+
+from libcloud.compute.types import Provider, LibcloudError
+from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
+from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver
+
+__all__ = [
+ 'KiliCloudNodeDriver'
+]
+
+ENDPOINT_ARGS = {
+ 'service_type': 'compute',
+ 'name': 'nova',
+ 'region': 'RegionOne'
+}
+
+AUTH_URL = 'https://api.kili.io/keystone/v2.0/tokens'
+
+
+class KiliCloudConnection(OpenStack_1_1_Connection):
+ _auth_version = '2.0_password'
+
+ def __init__(self, *args, **kwargs):
+ self.region = kwargs.pop('region', None)
+ self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
+ super(KiliCloudConnection, self).__init__(*args, **kwargs)
+
+ def get_endpoint(self):
+ if not self.get_endpoint_args:
+ raise LibcloudError(
+ 'KiliCloudConnection must have get_endpoint_args set')
+
+ if '2.0_password' in self._auth_version:
+ ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
+ else:
+ raise LibcloudError(
+ 'Auth version "%s" not supported' % (self._auth_version))
+
+ public_url = ep.get('publicURL', None)
+
+ if not public_url:
+ raise LibcloudError('Could not find specified endpoint')
+
+ return public_url
+
+
+class KiliCloudNodeDriver(OpenStack_1_1_NodeDriver):
+ name = 'Kili Public Cloud'
+ website = 'http://kili.io/'
+ connectionCls = KiliCloudConnection
+ type = Provider.HPCLOUD
+
+ def __init__(self, key, secret, tenant_name, secure=True,
+ host=None, port=None, **kwargs):
+ """
+ Note: tenant_name argument is required for Kili cloud.
+ """
+ self.tenant_name = tenant_name
+ super(KiliCloudNodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port,
+ **kwargs)
+
+ def _ex_connection_class_kwargs(self):
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['get_endpoint_args'] = ENDPOINT_ARGS
+ kwargs['ex_force_auth_url'] = AUTH_URL
+ kwargs['ex_tenant_name'] = self.tenant_name
+
+ return kwargs
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/ktucloud.py b/awx/lib/site-packages/libcloud/compute/drivers/ktucloud.py
new file mode 100644
index 0000000000..1bc8544563
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/ktucloud.py
@@ -0,0 +1,103 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.compute.providers import Provider
+from libcloud.compute.base import Node, NodeImage, NodeSize
+from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
+
+
+class KTUCloudNodeDriver(CloudStackNodeDriver):
+ "Driver for KTUCloud Compute platform."
+
+ EMPTY_DISKOFFERINGID = '0'
+ type = Provider.KTUCLOUD
+ name = 'KTUCloud'
+ website = 'https://ucloudbiz.olleh.com/'
+
+ def list_images(self, location=None):
+ args = {
+ 'templatefilter': 'executable'
+ }
+ if location is not None:
+ args['zoneid'] = location.id
+
+ imgs = self._sync_request(command='listAvailableProductTypes',
+ method='GET')
+ images = []
+
+ for img in imgs['producttypes']:
+ images.append(
+ NodeImage(
+ img['serviceofferingid'],
+ img['serviceofferingdesc'],
+ self,
+ {'hypervisor': '',
+ 'format': '',
+ 'os': img['templatedesc'],
+ 'templateid': img['templateid'],
+ 'zoneid': img['zoneid']}
+ )
+ )
+
+ return images
+
+ def list_sizes(self, location=None):
+ szs = self._sync_request('listAvailableProductTypes')
+ sizes = []
+ for sz in szs['producttypes']:
+ diskofferingid = sz.get('diskofferingid',
+ self.EMPTY_DISKOFFERINGID)
+ sizes.append(NodeSize(
+ diskofferingid,
+ sz['diskofferingdesc'],
+ 0, 0, 0, 0, self)
+ )
+ return sizes
+
+ def create_node(self, name, size, image, location=None, **kwargs):
+ params = {'displayname': name,
+ 'serviceofferingid': image.id,
+ 'templateid': str(image.extra['templateid']),
+ 'zoneid': str(image.extra['zoneid'])}
+
+ usageplantype = kwargs.pop('usageplantype', None)
+ if usageplantype is None:
+ params['usageplantype'] = 'hourly'
+ else:
+ params['usageplantype'] = usageplantype
+
+ if size.id != self.EMPTY_DISKOFFERINGID:
+ params['diskofferingid'] = size.id
+
+ result = self._async_request(
+ command='deployVirtualMachine',
+ params=params,
+ method='GET')
+
+ node = result['virtualmachine']
+
+ return Node(
+ id=node['id'],
+ name=node['displayname'],
+ state=self.NODE_STATE_MAP[node['state']],
+ public_ips=[],
+ private_ips=[],
+ driver=self,
+ extra={
+ 'zoneid': image.extra['zoneid'],
+ 'ip_addresses': [],
+ 'forwarding_rules': [],
+ }
+ )
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/libvirt_driver.py b/awx/lib/site-packages/libcloud/compute/drivers/libvirt_driver.py
new file mode 100644
index 0000000000..3618ac4368
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/libvirt_driver.py
@@ -0,0 +1,335 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import with_statement
+
+import re
+import os
+import time
+import platform
+import subprocess
+import mimetypes
+
+from os.path import join as pjoin
+from collections import defaultdict
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.compute.base import NodeDriver, Node
+from libcloud.compute.base import NodeState
+from libcloud.compute.types import Provider
+from libcloud.utils.networking import is_public_subnet
+
+try:
+ import libvirt
+ have_libvirt = True
+except ImportError:
+ have_libvirt = False
+
+
+class LibvirtNodeDriver(NodeDriver):
+ """
+ Libvirt (http://libvirt.org/) node driver.
+
+ To enable debug mode, set LIBVIR_DEBUG environment variable.
+ """
+
+ type = Provider.LIBVIRT
+ name = 'Libvirt'
+ website = 'http://libvirt.org/'
+
+ NODE_STATE_MAP = {
+ 0: NodeState.TERMINATED, # no state
+ 1: NodeState.RUNNING, # domain is running
+ 2: NodeState.PENDING, # domain is blocked on resource
+ 3: NodeState.TERMINATED, # domain is paused by user
+ 4: NodeState.TERMINATED, # domain is being shut down
+ 5: NodeState.TERMINATED, # domain is shut off
+ 6: NodeState.UNKNOWN, # domain is crashed
+ 7: NodeState.UNKNOWN, # domain is suspended by guest power management
+ }
+
+ def __init__(self, uri):
+ """
+ :param uri: Hypervisor URI (e.g. vbox:///session, qemu:///system,
+ etc.).
+ :type uri: ``str``
+ """
+ if not have_libvirt:
+ raise RuntimeError('Libvirt driver requires \'libvirt\' Python ' +
+ 'package')
+
+ self._uri = uri
+ self.connection = libvirt.open(uri)
+
+ def list_nodes(self):
+ domains = self.connection.listAllDomains()
+ nodes = self._to_nodes(domains=domains)
+ return nodes
+
+ def reboot_node(self, node):
+ domain = self._get_domain_for_node(node=node)
+ return domain.reboot(flags=0) == 0
+
+ def destroy_node(self, node):
+ domain = self._get_domain_for_node(node=node)
+ return domain.destroy() == 0
+
+ def ex_start_node(self, node):
+ """
+ Start a stopped node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ domain = self._get_domain_for_node(node=node)
+ return domain.create() == 0
+
+ def ex_shutdown_node(self, node):
+ """
+ Shutdown a running node.
+
+ Note: Usually this will result in sending an ACPI event to the node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ domain = self._get_domain_for_node(node=node)
+ return domain.shutdown() == 0
+
+ def ex_suspend_node(self, node):
+ """
+ Suspend a running node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ domain = self._get_domain_for_node(node=node)
+ return domain.suspend() == 0
+
+ def ex_resume_node(self, node):
+ """
+ Resume a suspended node.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ domain = self._get_domain_for_node(node=node)
+ return domain.resume() == 0
+
+ def ex_take_node_screenshot(self, node, directory, screen=0):
+ """
+ Take a screenshot of a monitoring of a running instance.
+
+ :param node: Node to take the screenshot of.
+ :type node: :class:`libcloud.compute.base.Node`
+
+ :param directory: Path where the screenshot will be saved.
+ :type directory: ``str``
+
+ :param screen: ID of the monitor to take the screenshot of.
+ :type screen: ``int``
+
+ :return: Full path where the screenshot has been saved.
+ :rtype: ``str``
+ """
+ if not os.path.exists(directory) or not os.path.isdir(directory):
+ raise ValueError('Invalid value for directory argument')
+
+ domain = self._get_domain_for_node(node=node)
+ stream = self.connection.newStream()
+ mime_type = domain.screenshot(stream=stream, screen=0)
+ extensions = mimetypes.guess_all_extensions(type=mime_type)
+
+ if extensions:
+ extension = extensions[0]
+ else:
+ extension = '.png'
+
+ name = 'screenshot-%s%s' % (int(time.time()), extension)
+ file_path = pjoin(directory, name)
+
+ with open(file_path, 'wb') as fp:
+ def write(stream, buf, opaque):
+ fp.write(buf)
+
+ stream.recvAll(write, None)
+
+ try:
+ stream.finish()
+ except Exception:
+ # Finish is not supported by all backends
+ pass
+
+ return file_path
+
+ def ex_get_hypervisor_hostname(self):
+ """
+ Return a system hostname on which the hypervisor is running.
+ """
+ hostname = self.connection.getHostname()
+ return hostname
+
+ def ex_get_hypervisor_sysinfo(self):
+ """
+ Retrieve hypervisor system information.
+
+ :rtype: ``dict``
+ """
+ xml = self.connection.getSysinfo()
+ etree = ET.XML(xml)
+
+ attributes = ['bios', 'system', 'processor', 'memory_device']
+
+ sysinfo = {}
+ for attribute in attributes:
+ element = etree.find(attribute)
+ entries = self._get_entries(element=element)
+ sysinfo[attribute] = entries
+
+ return sysinfo
+
+ def _to_nodes(self, domains):
+ nodes = [self._to_node(domain=domain) for domain in domains]
+ return nodes
+
+ def _to_node(self, domain):
+ state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
+ state = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
+
+ public_ips, private_ips = [], []
+
+ ip_addresses = self._get_ip_addresses_for_domain(domain)
+
+ for ip_address in ip_addresses:
+ if is_public_subnet(ip_address):
+ public_ips.append(ip_address)
+ else:
+ private_ips.append(ip_address)
+
+ extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(),
+ 'types': self.connection.getType(),
+ 'used_memory': memory / 1024, 'vcpu_count': vcpu_count,
+ 'used_cpu_time': used_cpu_time}
+
+ node = Node(id=domain.ID(), name=domain.name(), state=state,
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self, extra=extra)
+ node._uuid = domain.UUIDString() # we want to use a custom UUID
+ return node
+
+ def _get_ip_addresses_for_domain(self, domain):
+ """
+ Retrieve IP addresses for the provided domain.
+
+ Note: This functionality is currently only supported on Linux and
+ only works if this code is run on the same machine as the VMs run
+ on.
+
+ :return: IP addresses for the provided domain.
+ :rtype: ``list``
+ """
+ result = []
+
+ if platform.system() != 'Linux':
+ # Only Linux is supported atm
+ return result
+
+ mac_addresses = self._get_mac_addresses_for_domain(domain=domain)
+
+ cmd = ['arp', '-an']
+ child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, _ = child.communicate()
+ arp_table = self._parse_arp_table(arp_output=stdout)
+
+ for mac_address in mac_addresses:
+ if mac_address in arp_table:
+ ip_addresses = arp_table[mac_address]
+ result.extend(ip_addresses)
+
+ return result
+
+ def _get_mac_addresses_for_domain(self, domain):
+ """
+ Parses network interface MAC addresses from the provided domain.
+ """
+ xml = domain.XMLDesc()
+ etree = ET.XML(xml)
+ elems = etree.findall("devices/interface[@type='network']/mac")
+
+ result = []
+ for elem in elems:
+ mac_address = elem.get('address')
+ result.append(mac_address)
+
+ return result
+
+ def _get_domain_for_node(self, node):
+ """
+ Return libvirt domain object for the provided node.
+ """
+ domain = self.connection.lookupByUUIDString(node.uuid)
+ return domain
+
+ def _get_entries(self, element):
+ """
+ Parse entries dictionary.
+
+ :rtype: ``dict``
+ """
+ elements = element.findall('entry')
+
+ result = {}
+ for element in elements:
+ name = element.get('name')
+ value = element.text
+ result[name] = value
+
+ return result
+
+ def _parse_arp_table(self, arp_output):
+ """
+ Parse arp command output and return a dictionary which maps mac address
+ to an IP address.
+
+ :return: Dictionary which maps mac address to IP address.
+ :rtype: ``dict``
+ """
+ lines = arp_output.split('\n')
+
+ arp_table = defaultdict(list)
+ for line in lines:
+ match = re.match('.*?\((.*?)\) at (.*?)\s+', line)
+
+ if not match:
+ continue
+
+ groups = match.groups()
+ ip_address = groups[0]
+ mac_address = groups[1]
+ arp_table[mac_address].append(ip_address)
+
+ return arp_table
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/linode.py b/awx/lib/site-packages/libcloud/compute/drivers/linode.py
new file mode 100644
index 0000000000..4960f29472
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/linode.py
@@ -0,0 +1,548 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""libcloud driver for the Linode(R) API
+
+This driver implements all libcloud functionality for the Linode API.
+Since the API is a bit more fine-grained, create_node abstracts a significant
+amount of work (and may take a while to run).
+
+Linode home page http://www.linode.com/
+Linode API documentation http://www.linode.com/api/
+Alternate bindings for reference http://github.com/tjfontaine/linode-python
+
+Linode(R) is a registered trademark of Linode, LLC.
+
+"""
+
+import os
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+import itertools
+import binascii
+
+from copy import copy
+
+from libcloud.utils.py3 import PY3
+
+from libcloud.common.linode import (API_ROOT, LinodeException,
+ LinodeConnection, LINODE_PLAN_IDS)
+from libcloud.compute.types import Provider, NodeState
+from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
+from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
+from libcloud.compute.base import NodeImage
+
+
+class LinodeNodeDriver(NodeDriver):
+ """libcloud driver for the Linode API
+
+ Rough mapping of which is which:
+
+ list_nodes linode.list
+ reboot_node linode.reboot
+ destroy_node linode.delete
+ create_node linode.create, linode.update,
+ linode.disk.createfromdistribution,
+ linode.disk.create, linode.config.create,
+ linode.ip.addprivate, linode.boot
+ list_sizes avail.linodeplans
+ list_images avail.distributions
+ list_locations avail.datacenters
+
+ For more information on the Linode API, be sure to read the reference:
+
+ http://www.linode.com/api/
+ """
+ type = Provider.LINODE
+ name = "Linode"
+ website = 'http://www.linode.com/'
+ connectionCls = LinodeConnection
+ _linode_plan_ids = LINODE_PLAN_IDS
+ features = {'create_node': ['ssh_key', 'password']}
+
+ def __init__(self, key):
+ """Instantiate the driver with the given API key
+
+ :param key: the API key to use (required)
+ :type key: ``str``
+
+ :rtype: ``None``
+ """
+ self.datacenter = None
+ NodeDriver.__init__(self, key)
+
+ # Converts Linode's state from DB to a NodeState constant.
+ LINODE_STATES = {
+ (-2): NodeState.UNKNOWN, # Boot Failed
+ (-1): NodeState.PENDING, # Being Created
+ 0: NodeState.PENDING, # Brand New
+ 1: NodeState.RUNNING, # Running
+ 2: NodeState.TERMINATED, # Powered Off
+ 3: NodeState.REBOOTING, # Shutting Down
+ 4: NodeState.UNKNOWN # Reserved
+ }
+
+ def list_nodes(self):
+ """
+ List all Linodes that the API key can access
+
+ This call will return all Linodes that the API key in use has access
+ to.
+ If a node is in this list, rebooting will work; however, creation and
+ destruction are a separate grant.
+
+ :return: List of node objects that the API key can access
+ :rtype: ``list`` of :class:`Node`
+ """
+ params = {"api_action": "linode.list"}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ return self._to_nodes(data)
+
+ def reboot_node(self, node):
+ """
+ Reboot the given Linode
+
+ Will issue a shutdown job followed by a boot job, using the last booted
+ configuration. In most cases, this will be the only configuration.
+
+ :param node: the Linode to reboot
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ params = {"api_action": "linode.reboot", "LinodeID": node.id}
+ self.connection.request(API_ROOT, params=params)
+ return True
+
+ def destroy_node(self, node):
+ """Destroy the given Linode
+
+ Will remove the Linode from the account and issue a prorated credit. A
+ grant for removing Linodes from the account is required, otherwise this
+ method will fail.
+
+ In most cases, all disk images must be removed from a Linode before the
+ Linode can be removed; however, this call explicitly skips those
+ safeguards. There is no going back from this method.
+
+ :param node: the Linode to destroy
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ params = {"api_action": "linode.delete", "LinodeID": node.id,
+ "skipChecks": True}
+ self.connection.request(API_ROOT, params=params)
+ return True
+
+ def create_node(self, **kwargs):
+ """Create a new Linode, deploy a Linux distribution, and boot
+
+ This call abstracts much of the functionality of provisioning a Linode
+ and getting it booted. A global grant to add Linodes to the account is
+ required, as this call will result in a billing charge.
+
+ Note that there is a safety valve of 5 Linodes per hour, in order to
+ prevent a runaway script from ruining your day.
+
+ :keyword name: the name to assign the Linode (mandatory)
+ :type name: ``str``
+
+ :keyword image: which distribution to deploy on the Linode (mandatory)
+ :type image: :class:`NodeImage`
+
+ :keyword size: the plan size to create (mandatory)
+ :type size: :class:`NodeSize`
+
+ :keyword auth: an SSH key or root password (mandatory)
+ :type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
+
+ :keyword location: which datacenter to create the Linode in
+ :type location: :class:`NodeLocation`
+
+ :keyword ex_swap: size of the swap partition in MB (128)
+ :type ex_swap: ``int``
+
+ :keyword ex_rsize: size of the root partition in MB (plan size - swap).
+ :type ex_rsize: ``int``
+
+ :keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable).
+ :type ex_kernel: ``str``
+
+ :keyword ex_payment: one of 1, 12, or 24; subscription length (1)
+ :type ex_payment: ``int``
+
+ :keyword ex_comment: a small comment for the configuration (libcloud)
+ :type ex_comment: ``str``
+
+ :keyword ex_private: whether or not to request a private IP (False)
+ :type ex_private: ``bool``
+
+ :keyword lconfig: what to call the configuration (generated)
+ :type lconfig: ``str``
+
+ :keyword lroot: what to call the root image (generated)
+ :type lroot: ``str``
+
+ :keyword lswap: what to call the swap space (generated)
+ :type lswap: ``str``
+
+ :return: Node representing the newly-created Linode
+ :rtype: :class:`Node`
+ """
+ name = kwargs["name"]
+ image = kwargs["image"]
+ size = kwargs["size"]
+ auth = self._get_and_check_auth(kwargs["auth"])
+
+ # Pick a location (resolves LIBCLOUD-41 in JIRA)
+ if "location" in kwargs:
+ chosen = kwargs["location"].id
+ elif self.datacenter:
+ chosen = self.datacenter
+ else:
+ raise LinodeException(0xFB, "Need to select a datacenter first")
+
+ # Step 0: Parameter validation before we purchase
+ # We're especially careful here so we don't fail after purchase, rather
+ # than getting halfway through the process and having the API fail.
+
+ # Plan ID
+ plans = self.list_sizes()
+ if size.id not in [p.id for p in plans]:
+ raise LinodeException(0xFB, "Invalid plan ID -- avail.plans")
+
+ # Payment schedule
+ payment = "1" if "ex_payment" not in kwargs else \
+ str(kwargs["ex_payment"])
+ if payment not in ["1", "12", "24"]:
+ raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)")
+
+ ssh = None
+ root = None
+ # SSH key and/or root password
+ if isinstance(auth, NodeAuthSSHKey):
+ ssh = auth.pubkey
+ elif isinstance(auth, NodeAuthPassword):
+ root = auth.password
+
+ if not ssh and not root:
+ raise LinodeException(0xFB, "Need SSH key or root password")
+ if root is not None and len(root) < 6:
+ raise LinodeException(0xFB, "Root password is too short")
+
+ # Swap size
+ try:
+ swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"])
+ except:
+ raise LinodeException(0xFB, "Need an integer swap size")
+
+ # Root partition size
+ imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else\
+ int(kwargs["ex_rsize"])
+ if (imagesize + swap) > size.disk:
+ raise LinodeException(0xFB, "Total disk images are too big")
+
+ # Distribution ID
+ distros = self.list_images()
+ if image.id not in [d.id for d in distros]:
+ raise LinodeException(0xFB,
+ "Invalid distro -- avail.distributions")
+
+ # Kernel
+ if "ex_kernel" in kwargs:
+ kernel = kwargs["ex_kernel"]
+ else:
+ if image.extra['64bit']:
+ # For a list of available kernel ids, see
+ # https://www.linode.com/kernels/
+ kernel = 138
+ else:
+ kernel = 137
+ params = {"api_action": "avail.kernels"}
+ kernels = self.connection.request(API_ROOT, params=params).objects[0]
+ if kernel not in [z["KERNELID"] for z in kernels]:
+ raise LinodeException(0xFB, "Invalid kernel -- avail.kernels")
+
+ # Comments
+ comments = "Created by Apache libcloud " if\
+ "ex_comment" not in kwargs else kwargs["ex_comment"]
+
+ # Step 1: linode.create
+ params = {
+ "api_action": "linode.create",
+ "DatacenterID": chosen,
+ "PlanID": size.id,
+ "PaymentTerm": payment
+ }
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ linode = {"id": data["LinodeID"]}
+
+ # Step 1b. linode.update to rename the Linode
+ params = {
+ "api_action": "linode.update",
+ "LinodeID": linode["id"],
+ "Label": name
+ }
+ self.connection.request(API_ROOT, params=params)
+
+ # Step 1c. linode.ip.addprivate if it was requested
+ if "ex_private" in kwargs and kwargs["ex_private"]:
+ params = {
+ "api_action": "linode.ip.addprivate",
+ "LinodeID": linode["id"]
+ }
+ self.connection.request(API_ROOT, params=params)
+
+ # Step 1d. Labels
+ # use the linode id as the name can be up to 63 chars and the labels
+ # are limited to 48 chars
+ label = {
+ "lconfig": "[%s] Configuration Profile" % linode["id"],
+ "lroot": "[%s] %s Disk Image" % (linode["id"], image.name),
+ "lswap": "[%s] Swap Space" % linode["id"]
+ }
+ for what in ["lconfig", "lroot", "lswap"]:
+ if what in kwargs:
+ label[what] = kwargs[what]
+
+ # Step 2: linode.disk.createfromdistribution
+ if not root:
+ root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip()
+
+ params = {
+ "api_action": "linode.disk.createfromdistribution",
+ "LinodeID": linode["id"],
+ "DistributionID": image.id,
+ "Label": label["lroot"],
+ "Size": imagesize,
+ "rootPass": root,
+ }
+ if ssh:
+ params["rootSSHKey"] = ssh
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ linode["rootimage"] = data["DiskID"]
+
+ # Step 3: linode.disk.create for swap
+ params = {
+ "api_action": "linode.disk.create",
+ "LinodeID": linode["id"],
+ "Label": label["lswap"],
+ "Type": "swap",
+ "Size": swap
+ }
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ linode["swapimage"] = data["DiskID"]
+
+ # Step 4: linode.config.create for main profile
+ disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"])
+ params = {
+ "api_action": "linode.config.create",
+ "LinodeID": linode["id"],
+ "KernelID": kernel,
+ "Label": label["lconfig"],
+ "Comments": comments,
+ "DiskList": disks
+ }
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ linode["config"] = data["ConfigID"]
+
+ # Step 5: linode.boot
+ params = {
+ "api_action": "linode.boot",
+ "LinodeID": linode["id"],
+ "ConfigID": linode["config"]
+ }
+ self.connection.request(API_ROOT, params=params)
+
+ # Make a node out of it and hand it back
+ params = {"api_action": "linode.list", "LinodeID": linode["id"]}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ nodes = self._to_nodes(data)
+
+ if len(nodes) == 1:
+ node = nodes[0]
+ if getattr(auth, "generated", False):
+ node.extra['password'] = auth.password
+ return node
+
+ return None
+
+ def list_sizes(self, location=None):
+ """
+ List available Linode plans
+
+ Gets the sizes that can be used for creating a Linode. Since available
+ Linode plans vary per-location, this method can also be passed a
+ location to filter the availability.
+
+ :keyword location: the facility to retrieve plans in
+ :type location: :class:`NodeLocation`
+
+ :rtype: ``list`` of :class:`NodeSize`
+ """
+ params = {"api_action": "avail.linodeplans"}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ sizes = []
+ for obj in data:
+ n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"],
+ disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"],
+ price=obj["PRICE"], driver=self.connection.driver)
+ sizes.append(n)
+ return sizes
+
+ def list_images(self):
+ """
+ List available Linux distributions
+
+ Retrieve all Linux distributions that can be deployed to a Linode.
+
+ :rtype: ``list`` of :class:`NodeImage`
+ """
+ params = {"api_action": "avail.distributions"}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ distros = []
+ for obj in data:
+ i = NodeImage(id=obj["DISTRIBUTIONID"],
+ name=obj["LABEL"],
+ driver=self.connection.driver,
+ extra={'pvops': obj['REQUIRESPVOPSKERNEL'],
+ '64bit': obj['IS64BIT']})
+ distros.append(i)
+ return distros
+
+ def list_locations(self):
+ """
+ List available facilities for deployment
+
+ Retrieve all facilities that a Linode can be deployed in.
+
+ :rtype: ``list`` of :class:`NodeLocation`
+ """
+ params = {"api_action": "avail.datacenters"}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ nl = []
+ for dc in data:
+ country = None
+ if "USA" in dc["LOCATION"]:
+ country = "US"
+ elif "UK" in dc["LOCATION"]:
+ country = "GB"
+ elif "JP" in dc["LOCATION"]:
+ country = "JP"
+ else:
+ country = "??"
+ nl.append(NodeLocation(dc["DATACENTERID"],
+ dc["LOCATION"],
+ country,
+ self))
+ return nl
+
+ def linode_set_datacenter(self, dc):
+ """
+ Set the default datacenter for Linode creation
+
+ Since Linodes must be created in a facility, this function sets the
+ default that :class:`create_node` will use. If a location keyword is
+ not passed to :class:`create_node`, this method must have already been
+ used.
+
+ :keyword dc: the datacenter to create Linodes in unless specified
+ :type dc: :class:`NodeLocation`
+
+ :rtype: ``bool``
+ """
+ did = dc.id
+ params = {"api_action": "avail.datacenters"}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ for datacenter in data:
+ if did == dc["DATACENTERID"]:
+ self.datacenter = did
+ return
+
+ dcs = ", ".join([d["DATACENTERID"] for d in data])
+ self.datacenter = None
+ raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs)
+
+ def _to_nodes(self, objs):
+ """Convert returned JSON Linodes into Node instances
+
+ :keyword objs: ``list`` of JSON dictionaries representing the Linodes
+ :type objs: ``list``
+ :return: ``list`` of :class:`Node`s"""
+
+ # Get the IP addresses for the Linodes
+ nodes = {}
+ batch = []
+ for o in objs:
+ lid = o["LINODEID"]
+ nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ips=[],
+ private_ips=[],
+ state=self.LINODE_STATES[o["STATUS"]],
+ driver=self.connection.driver)
+ n.extra = copy(o)
+ n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM"))
+ batch.append({"api_action": "linode.ip.list", "LinodeID": lid})
+
+ # Avoid batch limitation
+ ip_answers = []
+ args = [iter(batch)] * 25
+
+ if PY3:
+ izip_longest = itertools.zip_longest
+ else:
+ izip_longest = getattr(itertools, 'izip_longest', _izip_longest)
+
+ for twenty_five in izip_longest(*args):
+ twenty_five = [q for q in twenty_five if q]
+ params = {"api_action": "batch",
+ "api_requestArray": json.dumps(twenty_five)}
+ req = self.connection.request(API_ROOT, params=params)
+ if not req.success() or len(req.objects) == 0:
+ return None
+ ip_answers.extend(req.objects)
+
+ # Add the returned IPs to the nodes and return them
+ for ip_list in ip_answers:
+ for ip in ip_list:
+ lid = ip["LINODEID"]
+ which = nodes[lid].public_ips if ip["ISPUBLIC"] == 1 else\
+ nodes[lid].private_ips
+ which.append(ip["IPADDRESS"])
+ return list(nodes.values())
+
+
+def _izip_longest(*args, **kwds):
+ """Taken from Python docs
+
+ http://docs.python.org/library/itertools.html#itertools.izip
+ """
+
+ fillvalue = kwds.get('fillvalue')
+
+ def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
+ yield counter() # yields the fillvalue, or raises IndexError
+
+ fillers = itertools.repeat(fillvalue)
+ iters = [itertools.chain(it, sentinel(), fillers) for it in args]
+ try:
+ for tup in itertools.izip(*iters):
+ yield tup
+ except IndexError:
+ pass
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/nephoscale.py b/awx/lib/site-packages/libcloud/compute/drivers/nephoscale.py
new file mode 100644
index 0000000000..1888ac8f50
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/nephoscale.py
@@ -0,0 +1,448 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+NephoScale Cloud driver (http://www.nephoscale.com)
+API documentation: http://docs.nephoscale.com
+Created by Markos Gogoulos (https://mist.io)
+"""
+
+import base64
+import sys
+import time
+import os
+import binascii
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import urlencode
+
+from libcloud.compute.providers import Provider
+from libcloud.common.base import JsonResponse, ConnectionUserAndKey
+from libcloud.compute.types import (NodeState, InvalidCredsError,
+ LibcloudError)
+from libcloud.compute.base import (Node, NodeDriver, NodeImage, NodeSize,
+ NodeLocation)
+from libcloud.utils.networking import is_private_subnet
+
+API_HOST = 'api.nephoscale.com'
+
+NODE_STATE_MAP = {
+ 'on': NodeState.RUNNING,
+ 'off': NodeState.UNKNOWN,
+ 'unknown': NodeState.UNKNOWN,
+}
+
+VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
+ httplib.NO_CONTENT]
+
+# used in create_node and specifies how many times to get the list of nodes and
+# check if the newly created node is there. This is because when a request is
+# sent to create a node, NephoScale replies with the job id, and not the node
+# itself thus we don't have the ip addresses, that are required in deploy_node
+CONNECT_ATTEMPTS = 10
+
+
+class NodeKey(object):
+ def __init__(self, id, name, public_key=None, key_group=None,
+ password=None):
+ self.id = id
+ self.name = name
+ self.key_group = key_group
+ self.password = password
+ self.public_key = public_key
+
+ def __repr__(self):
+ return (('') %
+ (self.id, self.name))
+
+
+class NephoscaleResponse(JsonResponse):
+ """
+ Nephoscale API Response
+ """
+
+ def parse_error(self):
+ if self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError('Authorization Failed')
+ if self.status == httplib.NOT_FOUND:
+ raise Exception("The resource you are looking for is not found.")
+
+ return self.body
+
+ def success(self):
+ return self.status in VALID_RESPONSE_CODES
+
+
+class NephoscaleConnection(ConnectionUserAndKey):
+ """
+ Nephoscale connection class.
+ Authenticates to the API through Basic Authentication
+ with username/password
+ """
+ host = API_HOST
+ responseCls = NephoscaleResponse
+
+ allow_insecure = False
+
+ def add_default_headers(self, headers):
+ """
+ Add parameters that are necessary for every request
+ """
+ user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
+ headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
+ return headers
+
+
+class NephoscaleNodeDriver(NodeDriver):
+ """
+ Nephoscale node driver class.
+
+ >>> from libcloud.compute.providers import get_driver
+ >>> driver = get_driver('nephoscale')
+ >>> conn = driver('nepho_user','nepho_password')
+ >>> conn.list_nodes()
+ """
+
+ type = Provider.NEPHOSCALE
+ api_name = 'nephoscale'
+ name = 'NephoScale'
+ website = 'http://www.nephoscale.com'
+ connectionCls = NephoscaleConnection
+ features = {'create_node': ['ssh_key']}
+
+ def list_locations(self):
+ """
+ List available zones for deployment
+
+ :rtype: ``list`` of :class:`NodeLocation`
+ """
+ result = self.connection.request('/datacenter/zone/').object
+ locations = []
+ for value in result.get('data', []):
+ location = NodeLocation(id=value.get('id'),
+ name=value.get('name'),
+ country='US',
+ driver=self)
+ locations.append(location)
+ return locations
+
+ def list_images(self):
+ """
+ List available images for deployment
+
+ :rtype: ``list`` of :class:`NodeImage`
+ """
+ result = self.connection.request('/image/server/').object
+ images = []
+ for value in result.get('data', []):
+ extra = {'architecture': value.get('architecture'),
+ 'disks': value.get('disks'),
+ 'billable_type': value.get('billable_type'),
+ 'pcpus': value.get('pcpus'),
+ 'cores': value.get('cores'),
+ 'uri': value.get('uri'),
+ 'storage': value.get('storage'),
+ }
+ image = NodeImage(id=value.get('id'),
+ name=value.get('friendly_name'),
+ driver=self,
+ extra=extra)
+ images.append(image)
+ return images
+
+ def list_sizes(self):
+ """
+ List available sizes containing prices
+
+ :rtype: ``list`` of :class:`NodeSize`
+ """
+ result = self.connection.request('/server/type/cloud/').object
+ sizes = []
+ for value in result.get('data', []):
+ value_id = value.get('id')
+ size = NodeSize(id=value_id,
+ name=value.get('friendly_name'),
+ ram=value.get('ram'),
+ disk=value.get('storage'),
+ bandwidth=None,
+ price=self._get_size_price(size_id=str(value_id)),
+ driver=self)
+ sizes.append(size)
+
+ return sorted(sizes, key=lambda k: k.price)
+
+ def list_nodes(self):
+ """
+ List available nodes
+
+ :rtype: ``list`` of :class:`Node`
+ """
+ result = self.connection.request('/server/cloud/').object
+ nodes = [self._to_node(value) for value in result.get('data', [])]
+ return nodes
+
+ def rename_node(self, node, name, hostname=None):
+ """rename a cloud server, optionally specify hostname too"""
+ data = {'name': name}
+ if hostname:
+ data['hostname'] = hostname
+ params = urlencode(data)
+ result = self.connection.request('/server/cloud/%s/' % node.id,
+ data=params, method='PUT').object
+ return result.get('response') in VALID_RESPONSE_CODES
+
+ def reboot_node(self, node):
+ """reboot a running node"""
+ result = self.connection.request('/server/cloud/%s/initiator/restart/'
+ % node.id, method='POST').object
+ return result.get('response') in VALID_RESPONSE_CODES
+
+ def ex_start_node(self, node):
+ """start a stopped node"""
+ result = self.connection.request('/server/cloud/%s/initiator/start/'
+ % node.id, method='POST').object
+ return result.get('response') in VALID_RESPONSE_CODES
+
+ def ex_stop_node(self, node):
+ """stop a running node"""
+ result = self.connection.request('/server/cloud/%s/initiator/stop/'
+ % node.id, method='POST').object
+ return result.get('response') in VALID_RESPONSE_CODES
+
+ def destroy_node(self, node):
+ """destroy a node"""
+ result = self.connection.request('/server/cloud/%s/' % node.id,
+ method='DELETE').object
+ return result.get('response') in VALID_RESPONSE_CODES
+
+ def ex_list_keypairs(self, ssh=False, password=False, key_group=None):
+ """
+ List available console and server keys
+ There are two types of keys for NephoScale, ssh and password keys.
+ If run without arguments, lists all keys. Otherwise list only
+ ssh keys, or only password keys.
+ Password keys with key_group 4 are console keys. When a server
+ is created, it has two keys, one password or ssh key, and
+ one password console key.
+
+ :keyword ssh: if specified, show ssh keys only (optional)
+ :type ssh: ``bool``
+
+ :keyword password: if specified, show password keys only (optional)
+ :type password: ``bool``
+
+ :keyword key_group: if specified, show keys with this key_group only
+ eg key_group=4 for console password keys (optional)
+ :type key_group: ``int``
+
+ :rtype: ``list`` of :class:`NodeKey`
+ """
+ if (ssh and password):
+ raise LibcloudError('You can only supply ssh or password. To \
+get all keys call with no arguments')
+ if ssh:
+ result = self.connection.request('/key/sshrsa/').object
+ elif password:
+ result = self.connection.request('/key/password/').object
+ else:
+ result = self.connection.request('/key/').object
+ keys = [self._to_key(value) for value in result.get('data', [])]
+
+ if key_group:
+ keys = [key for key in keys if
+ key.key_group == key_group]
+ return keys
+
+ def ex_create_keypair(self, name, public_key=None, password=None,
+ key_group=None):
+ """Creates a key, ssh or password, for server or console
+ The group for the key (key_group) is 1 for Server and 4 for Console
+ Returns the id of the created key
+ """
+ if public_key:
+ if not key_group:
+ key_group = 1
+ data = {
+ 'name': name,
+ 'public_key': public_key,
+ 'key_group': key_group
+
+ }
+ params = urlencode(data)
+ result = self.connection.request('/key/sshrsa/', data=params,
+ method='POST').object
+ else:
+ if not key_group:
+ key_group = 4
+ if not password:
+ password = self.random_password()
+ data = {
+ 'name': name,
+ 'password': password,
+ 'key_group': key_group
+ }
+ params = urlencode(data)
+ result = self.connection.request('/key/password/', data=params,
+ method='POST').object
+ return result.get('data', {}).get('id', '')
+
+ def ex_delete_keypair(self, key_id, ssh=False):
+ """Delete an ssh key or password given it's id
+ """
+ if ssh:
+ result = self.connection.request('/key/sshrsa/%s/' % key_id,
+ method='DELETE').object
+ else:
+ result = self.connection.request('/key/password/%s/' % key_id,
+ method='DELETE').object
+ return result.get('response') in VALID_RESPONSE_CODES
+
+ def create_node(self, name, size, image, server_key=None,
+ console_key=None, zone=None, **kwargs):
+ """Creates the node, and sets the ssh key, console key
+ NephoScale will respond with a 200-200 response after sending a valid
+ request. If nowait=True is specified in the args, we then ask a few
+ times until the server is created and assigned a public IP address,
+ so that deploy_node can be run
+
+ >>> from libcloud.compute.providers import get_driver
+ >>> driver = get_driver('nephoscale')
+ >>> conn = driver('nepho_user','nepho_password')
+ >>> conn.list_nodes()
+ >>> name = 'staging-server'
+ >>> size = conn.list_sizes()[0]
+
+ >>> image = conn.list_images()[9]
+
+ >>> server_keys = conn.ex_list_keypairs(key_group=1)[0]
+
+ >>> server_key = conn.ex_list_keypairs(key_group=1)[0].id
+ 70867
+ >>> console_keys = conn.ex_list_keypairs(key_group=4)[0]
+
+ >>> console_key = conn.ex_list_keypairs(key_group=4)[0].id
+ 70907
+ >>> node = conn.create_node(name=name, size=size, image=image, \
+ console_key=console_key, server_key=server_key)
+
+ We can also create an ssh key, plus a console key and
+ deploy node with them
+ >>> server_key = conn.ex_create_keypair(name, public_key='123')
+ 71211
+ >>> console_key = conn.ex_create_keypair(name, key_group=4)
+ 71213
+
+ We can increase the number of connect attempts to wait until
+ the node is created, so that deploy_node has ip address to
+ deploy the script
+ We can also specify the location
+ >>> location = conn.list_locations()[0]
+ >>> node = conn.create_node(name=name,
+ ... size=size,
+ ... image=image,
+ ... console_key=console_key,
+ ... server_key=server_key,
+ ... connect_attempts=10,
+ ... nowait=True,
+ ... zone=location.id)
+ """
+ hostname = kwargs.get('hostname', name)
+ service_type = size.id
+ image = image.id
+ connect_attempts = int(kwargs.get('connect_attempts',
+ CONNECT_ATTEMPTS))
+
+ data = {'name': name,
+ 'hostname': hostname,
+ 'service_type': service_type,
+ 'image': image,
+ 'server_key': server_key,
+ 'console_key': console_key,
+ 'zone': zone
+ }
+
+ params = urlencode(data)
+ try:
+ node = self.connection.request('/server/cloud/', data=params,
+ method='POST')
+ except Exception:
+ e = sys.exc_info()[1]
+ raise Exception("Failed to create node %s" % e)
+ node = Node(id='', name=name, state=NodeState.UNKNOWN, public_ips=[],
+ private_ips=[], driver=self)
+
+ nowait = kwargs.get('ex_wait', False)
+ if not nowait:
+ return node
+ else:
+ # try to get the created node public ips, for use in deploy_node
+ # At this point we don't have the id of the newly created Node,
+ # so search name in nodes
+ created_node = False
+ while connect_attempts > 0:
+ nodes = self.list_nodes()
+ created_node = [c_node for c_node in nodes if
+ c_node.name == name]
+ if created_node:
+ return created_node[0]
+ else:
+ time.sleep(60)
+ connect_attempts = connect_attempts - 1
+ return node
+
+ def _to_node(self, data):
+ """Convert node in Node instances
+ """
+
+ state = NODE_STATE_MAP.get(data.get('power_status'), '4')
+ public_ips = []
+ private_ips = []
+ ip_addresses = data.get('ipaddresses', '')
+ # E.g. "ipaddresses": "198.120.14.6, 10.132.60.1"
+ if ip_addresses:
+ for ip in ip_addresses.split(','):
+ ip = ip.replace(' ', '')
+ if is_private_subnet(ip):
+ private_ips.append(ip)
+ else:
+ public_ips.append(ip)
+ extra = {
+ 'zone_data': data.get('zone'),
+ 'zone': data.get('zone', {}).get('name'),
+ 'image': data.get('image', {}).get('friendly_name'),
+ 'create_time': data.get('create_time'),
+ 'network_ports': data.get('network_ports'),
+ 'is_console_enabled': data.get('is_console_enabled'),
+ 'service_type': data.get('service_type', {}).get('friendly_name'),
+ 'hostname': data.get('hostname')
+ }
+
+ node = Node(id=data.get('id'), name=data.get('name'), state=state,
+ public_ips=public_ips, private_ips=private_ips,
+ driver=self, extra=extra)
+ return node
+
+ def _to_key(self, data):
+ return NodeKey(id=data.get('id'),
+ name=data.get('name'),
+ password=data.get('password'),
+ key_group=data.get('key_group'),
+ public_key=data.get('public_key'))
+
+ def random_password(self, size=8):
+ value = os.urandom(size)
+ password = binascii.hexlify(value).decode('ascii')
+ return password[:size]
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/ninefold.py b/awx/lib/site-packages/libcloud/compute/drivers/ninefold.py
new file mode 100644
index 0000000000..2689aad794
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/ninefold.py
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.compute.providers import Provider
+
+from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
+
+
+class NinefoldNodeDriver(CloudStackNodeDriver):
+ "Driver for Ninefold's Compute platform."
+
+ host = 'api.ninefold.com'
+ path = '/compute/v1.0/'
+
+ type = Provider.NINEFOLD
+ name = 'Ninefold'
+ website = 'http://ninefold.com/'
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/opennebula.py b/awx/lib/site-packages/libcloud/compute/drivers/opennebula.py
new file mode 100644
index 0000000000..c295cd49f5
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/opennebula.py
@@ -0,0 +1,1264 @@
+# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad
+# Complutense de Madrid (dsa-research.org)
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+OpenNebula.org driver.
+"""
+
+__docformat__ = 'epytext'
+
+from base64 import b64encode
+import hashlib
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import next
+from libcloud.utils.py3 import b
+
+from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation
+from libcloud.common.base import ConnectionUserAndKey, XmlResponse
+from libcloud.compute.base import NodeImage, NodeSize, StorageVolume
+from libcloud.common.types import InvalidCredsError
+from libcloud.compute.providers import Provider
+
+__all__ = [
+ 'ACTION',
+ 'OpenNebulaResponse',
+ 'OpenNebulaConnection',
+ 'OpenNebulaNodeSize',
+ 'OpenNebulaNetwork',
+ 'OpenNebulaNodeDriver',
+ 'OpenNebula_1_4_NodeDriver',
+ 'OpenNebula_2_0_NodeDriver',
+ 'OpenNebula_3_0_NodeDriver',
+ 'OpenNebula_3_2_NodeDriver',
+ 'OpenNebula_3_8_NodeDriver']
+
+API_HOST = ''
+API_PORT = (4567, 443)
+API_SECURE = True
+API_PLAIN_AUTH = False
+DEFAULT_API_VERSION = '3.2'
+
+
+class ACTION(object):
+ """
+ All actions, except RESUME, only apply when the VM is in the "Running"
+ state.
+ """
+
+ STOP = 'STOPPED'
+ """
+ The VM is stopped, and its memory state stored to a checkpoint file. VM
+ state, and disk image, are transferred back to the front-end. Resuming
+ the VM requires the VM instance to be re-scheduled.
+ """
+
+ SUSPEND = 'SUSPENDED'
+ """
+ The VM is stopped, and its memory state stored to a checkpoint file. The VM
+ state, and disk image, are left on the host to be resumed later. Resuming
+ the VM does not require the VM to be re-scheduled. Rather, after
+ suspending, the VM resources are reserved for later resuming.
+ """
+
+ RESUME = 'RESUME'
+ """
+ The VM is resumed using the saved memory state from the checkpoint file,
+ and the VM's disk image. The VM is either started immediately, or
+ re-scheduled depending on how it was suspended.
+ """
+
+ CANCEL = 'CANCEL'
+ """
+ The VM is forcibly shutdown, its memory state is deleted. If a persistent
+ disk image was used, that disk image is transferred back to the front-end.
+ Any non-persistent disk images are deleted.
+ """
+
+ SHUTDOWN = 'SHUTDOWN'
+ """
+ The VM is gracefully shutdown by sending the ACPI signal. If the VM does
+ not shutdown, then it is considered to still be running. If successfully,
+ shutdown, its memory state is deleted. If a persistent disk image was used,
+ that disk image is transferred back to the front-end. Any non-persistent
+ disk images are deleted.
+ """
+
+ REBOOT = 'REBOOT'
+ """
+ Introduced in OpenNebula v3.2.
+
+ The VM is gracefully restarted by sending the ACPI signal.
+ """
+
+ DONE = 'DONE'
+ """
+ The VM is forcibly shutdown, its memory state is deleted. If a persistent
+ disk image was used, that disk image is transferred back to the front-end.
+ Any non-persistent disk images are deleted.
+ """
+
+
+class OpenNebulaResponse(XmlResponse):
+ """
+ XmlResponse class for the OpenNebula.org driver.
+ """
+
+ def success(self):
+ """
+ Check if response has the appropriate HTTP response code to be a
+ success.
+
+ :rtype: ``bool``
+ :return: True is success, else False.
+ """
+ i = int(self.status)
+ return i >= 200 and i <= 299
+
+ def parse_error(self):
+ """
+ Check if response contains any errors.
+
+ @raise: :class:`InvalidCredsError`
+
+ :rtype: :class:`ElementTree`
+ :return: Contents of HTTP response body.
+ """
+ if int(self.status) == httplib.UNAUTHORIZED:
+ raise InvalidCredsError(self.body)
+ return self.body
+
+
+class OpenNebulaConnection(ConnectionUserAndKey):
+ """
+ Connection class for the OpenNebula.org driver.
+ with plain_auth support
+ """
+
+ host = API_HOST
+ port = API_PORT
+ secure = API_SECURE
+ plain_auth = API_PLAIN_AUTH
+ responseCls = OpenNebulaResponse
+
+ def __init__(self, *args, **kwargs):
+ if 'plain_auth' in kwargs:
+ self.plain_auth = kwargs.pop('plain_auth')
+ super(OpenNebulaConnection, self).__init__(*args, **kwargs)
+
+ def add_default_headers(self, headers):
+ """
+ Add headers required by the OpenNebula.org OCCI interface.
+
+ Includes adding Basic HTTP Authorization headers for authenticating
+ against the OpenNebula.org OCCI interface.
+
+ :type headers: ``dict``
+ :param headers: Dictionary containing HTTP headers.
+
+ :rtype: ``dict``
+ :return: Dictionary containing updated headers.
+ """
+ if self.plain_auth:
+ passwd = self.key
+ else:
+ passwd = hashlib.sha1(b(self.key)).hexdigest()
+ headers['Authorization'] =\
+ ('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
+ passwd))).decode('utf-8'))
+ return headers
+
+
+class OpenNebulaNodeSize(NodeSize):
+ """
+ NodeSize class for the OpenNebula.org driver.
+ """
+
+ def __init__(self, id, name, ram, disk, bandwidth, price, driver,
+ cpu=None, vcpu=None):
+ super(OpenNebulaNodeSize, self).__init__(id=id, name=name, ram=ram,
+ disk=disk,
+ bandwidth=bandwidth,
+ price=price, driver=driver)
+ self.cpu = cpu
+ self.vcpu = vcpu
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.ram, self.disk, self.bandwidth,
+ self.price, self.driver.name, self.cpu, self.vcpu))
+
+
+class OpenNebulaNetwork(object):
+ """
+ Provide a common interface for handling networks of all types.
+
+ Network objects are analogous to physical switches connecting two or
+ more physical nodes together. The Network object provides the interface in
+ libcloud through which we can manipulate networks in different cloud
+ providers in the same way. Network objects don't actually do much directly
+ themselves, instead the network driver handles the connection to the
+ network.
+
+ You don't normally create a network object yourself; instead you use
+ a driver and then have that create the network for you.
+
+ >>> from libcloud.compute.drivers.dummy import DummyNodeDriver
+ >>> driver = DummyNodeDriver()
+ >>> network = driver.create_network()
+ >>> network = driver.list_networks()[0]
+ >>> network.name
+ 'dummy-1'
+ """
+
+ def __init__(self, id, name, address, size, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.address = address
+ self.size = size
+ self.driver = driver
+ self.uuid = self.get_uuid()
+ self.extra = extra or {}
+
+ def get_uuid(self):
+ """
+ Unique hash for this network.
+
+ The hash is a function of an SHA1 hash of the network's ID and
+ its driver which means that it should be unique between all
+ networks. In some subclasses (e.g. GoGrid) there is no ID
+ available so the public IP address is used. This means that,
+ unlike a properly done system UUID, the same UUID may mean a
+ different system install at a different time
+
+ >>> from libcloud.network.drivers.dummy import DummyNetworkDriver
+ >>> driver = DummyNetworkDriver()
+ >>> network = driver.create_network()
+ >>> network.get_uuid()
+ 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
+
+ Note, for example, that this example will always produce the
+ same UUID!
+
+ :rtype: ``str``
+ :return: Unique identifier for this instance.
+ """
+ return hashlib.sha1(b("%s:%s" % (self.id,
+ self.driver.type))).hexdigest()
+
+ def __repr__(self):
+ return (('')
+ % (self.uuid, self.name, self.address, self.size,
+ self.driver.name))
+
+
+class OpenNebulaNodeDriver(NodeDriver):
+ """
+ OpenNebula.org node driver.
+ """
+
+ connectionCls = OpenNebulaConnection
+ name = 'OpenNebula'
+ website = 'http://opennebula.org/'
+ type = Provider.OPENNEBULA
+
+ NODE_STATE_MAP = {
+ 'INIT': NodeState.PENDING,
+ 'PENDING': NodeState.PENDING,
+ 'HOLD': NodeState.PENDING,
+ 'ACTIVE': NodeState.RUNNING,
+ 'STOPPED': NodeState.TERMINATED,
+ 'SUSPENDED': NodeState.PENDING,
+ 'DONE': NodeState.TERMINATED,
+ 'FAILED': NodeState.TERMINATED}
+
+ def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION,
+ **kwargs):
+ if cls is OpenNebulaNodeDriver:
+ if api_version in ['1.4']:
+ cls = OpenNebula_1_4_NodeDriver
+ elif api_version in ['2.0', '2.2']:
+ cls = OpenNebula_2_0_NodeDriver
+ elif api_version in ['3.0']:
+ cls = OpenNebula_3_0_NodeDriver
+ elif api_version in ['3.2']:
+ cls = OpenNebula_3_2_NodeDriver
+ elif api_version in ['3.6']:
+ cls = OpenNebula_3_6_NodeDriver
+ elif api_version in ['3.8']:
+ cls = OpenNebula_3_8_NodeDriver
+ if 'plain_auth' not in kwargs:
+ kwargs['plain_auth'] = cls.plain_auth
+ else:
+ cls.plain_auth = kwargs['plain_auth']
+ else:
+ raise NotImplementedError(
+ "No OpenNebulaNodeDriver found for API version %s" %
+ (api_version))
+ return super(OpenNebulaNodeDriver, cls).__new__(cls)
+
+ def create_node(self, **kwargs):
+ """
+ Create a new OpenNebula node.
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword networks: List of virtual networks to which this node should
+ connect. (optional)
+ :type networks: :class:`OpenNebulaNetwork` or
+ ``list`` of :class:`OpenNebulaNetwork`
+ """
+ compute = ET.Element('COMPUTE')
+
+ name = ET.SubElement(compute, 'NAME')
+ name.text = kwargs['name']
+
+ instance_type = ET.SubElement(compute, 'INSTANCE_TYPE')
+ instance_type.text = kwargs['size'].name
+
+ storage = ET.SubElement(compute, 'STORAGE')
+ ET.SubElement(storage,
+ 'DISK',
+ {'image': '%s' % (str(kwargs['image'].id))})
+
+ if 'networks' in kwargs:
+ if not isinstance(kwargs['networks'], list):
+ kwargs['networks'] = [kwargs['networks']]
+
+ networkGroup = ET.SubElement(compute, 'NETWORK')
+ for network in kwargs['networks']:
+ if network.address:
+ ET.SubElement(networkGroup, 'NIC',
+ {'network': '%s' % (str(network.id)),
+ 'ip': network.address})
+ else:
+ ET.SubElement(networkGroup, 'NIC',
+ {'network': '%s' % (str(network.id))})
+
+ xml = ET.tostring(compute)
+ node = self.connection.request('/compute', method='POST',
+ data=xml).object
+
+ return self._to_node(node)
+
+ def destroy_node(self, node):
+ url = '/compute/%s' % (str(node.id))
+ resp = self.connection.request(url, method='DELETE')
+
+ return resp.status == httplib.OK
+
+ def list_nodes(self):
+ return self._to_nodes(self.connection.request('/compute').object)
+
+ def list_images(self, location=None):
+ return self._to_images(self.connection.request('/storage').object)
+
+ def list_sizes(self, location=None):
+ """
+ Return list of sizes on a provider.
+
+ @inherits: :class:`NodeDriver.list_sizes`
+
+ :return: List of compute node sizes supported by the cloud provider.
+ :rtype: ``list`` of :class:`OpenNebulaNodeSize`
+ """
+ return [
+ NodeSize(id=1,
+ name='small',
+ ram=None,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ NodeSize(id=2,
+ name='medium',
+ ram=None,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ NodeSize(id=3,
+ name='large',
+ ram=None,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ ]
+
+ def list_locations(self):
+ return [NodeLocation(0, '', '', self)]
+
+ def ex_list_networks(self, location=None):
+ """
+ List virtual networks on a provider.
+
+ :param location: Location from which to request a list of virtual
+ networks. (optional)
+ :type location: :class:`NodeLocation`
+
+ :return: List of virtual networks available to be connected to a
+ compute node.
+ :rtype: ``list`` of :class:`OpenNebulaNetwork`
+ """
+ return self._to_networks(self.connection.request('/network').object)
+
+ def ex_node_action(self, node, action):
+ """
+ Build action representation and instruct node to commit action.
+
+ Build action representation from the compute node ID, and the
+ action which should be carried out on that compute node. Then
+ instruct the node to carry out that action.
+
+ :param node: Compute node instance.
+ :type node: :class:`Node`
+
+ :param action: Action to be carried out on the compute node.
+ :type action: ``str``
+
+ :return: False if an HTTP Bad Request is received, else, True is
+ returned.
+ :rtype: ``bool``
+ """
+ compute_node_id = str(node.id)
+
+ compute = ET.Element('COMPUTE')
+
+ compute_id = ET.SubElement(compute, 'ID')
+ compute_id.text = compute_node_id
+
+ state = ET.SubElement(compute, 'STATE')
+ state.text = action
+
+ xml = ET.tostring(compute)
+
+ url = '/compute/%s' % compute_node_id
+ resp = self.connection.request(url, method='PUT',
+ data=xml)
+
+ if resp.status == httplib.BAD_REQUEST:
+ return False
+ else:
+ return True
+
+ def _to_images(self, object):
+ """
+ Request a list of images and convert that list to a list of NodeImage
+ objects.
+
+ Request a list of images from the OpenNebula web interface, and
+ issue a request to convert each XML object representation of an image
+ to a NodeImage object.
+
+ :rtype: ``list`` of :class:`NodeImage`
+ :return: List of images.
+ """
+ images = []
+ for element in object.findall('DISK'):
+ image_id = element.attrib['href'].partition('/storage/')[2]
+ image = self.connection.request(
+ ('/storage/%s' % (image_id))).object
+ images.append(self._to_image(image))
+
+ return images
+
+ def _to_image(self, image):
+ """
+ Take XML object containing an image description and convert to
+ NodeImage object.
+
+ :type image: :class:`ElementTree`
+ :param image: XML representation of an image.
+
+ :rtype: :class:`NodeImage`
+ :return: The newly extracted :class:`NodeImage`.
+ """
+ return NodeImage(id=image.findtext('ID'),
+ name=image.findtext('NAME'),
+ driver=self.connection.driver,
+ extra={'size': image.findtext('SIZE'),
+ 'url': image.findtext('URL')})
+
+ def _to_networks(self, object):
+ """
+ Request a list of networks and convert that list to a list of
+ OpenNebulaNetwork objects.
+
+ Request a list of networks from the OpenNebula web interface, and
+ issue a request to convert each XML object representation of a network
+ to an OpenNebulaNetwork object.
+
+ :rtype: ``list`` of :class:`OpenNebulaNetwork`
+ :return: List of virtual networks.
+ """
+ networks = []
+ for element in object.findall('NETWORK'):
+ network_id = element.attrib['href'].partition('/network/')[2]
+ network_element = self.connection.request(
+ ('/network/%s' % (network_id))).object
+ networks.append(self._to_network(network_element))
+
+ return networks
+
+ def _to_network(self, element):
+ """
+ Take XML object containing a network description and convert to
+ OpenNebulaNetwork object.
+
+ Take XML representation containing a network description and
+ convert to OpenNebulaNetwork object.
+
+ :rtype: :class:`OpenNebulaNetwork`
+ :return: The newly extracted :class:`OpenNebulaNetwork`.
+ """
+ return OpenNebulaNetwork(id=element.findtext('ID'),
+ name=element.findtext('NAME'),
+ address=element.findtext('ADDRESS'),
+ size=element.findtext('SIZE'),
+ driver=self.connection.driver)
+
+ def _to_nodes(self, object):
+ """
+ Request a list of compute nodes and convert that list to a list of
+ Node objects.
+
+ Request a list of compute nodes from the OpenNebula web interface, and
+ issue a request to convert each XML object representation of a node
+ to a Node object.
+
+ :rtype: ``list`` of :class:`Node`
+ :return: A list of compute nodes.
+ """
+ computes = []
+ for element in object.findall('COMPUTE'):
+ compute_id = element.attrib['href'].partition('/compute/')[2]
+ compute = self.connection.request(
+ ('/compute/%s' % (compute_id))).object
+ computes.append(self._to_node(compute))
+
+ return computes
+
+ def _to_node(self, compute):
+ """
+ Take XML object containing a compute node description and convert to
+ Node object.
+
+ Take XML representation containing a compute node description and
+ convert to Node object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: :class:`Node`
+ :return: The newly extracted :class:`Node`.
+ """
+ try:
+ state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()]
+ except KeyError:
+ state = NodeState.UNKNOWN
+
+ return Node(id=compute.findtext('ID'),
+ name=compute.findtext('NAME'),
+ state=state,
+ public_ips=self._extract_networks(compute),
+ private_ips=[],
+ driver=self.connection.driver,
+ image=self._extract_images(compute))
+
+ def _extract_networks(self, compute):
+ """
+ Extract networks from a compute node XML representation.
+
+ Extract network descriptions from a compute node XML representation,
+ converting each network to an OpenNebulaNetwork object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: ``list`` of :class:`OpenNebulaNetwork`s.
+ :return: List of virtual networks attached to the compute node.
+ """
+ networks = list()
+
+ network_list = compute.find('NETWORK')
+ for element in network_list.findall('NIC'):
+ networks.append(
+ OpenNebulaNetwork(id=element.attrib.get('network', None),
+ name=None,
+ address=element.attrib.get('ip', None),
+ size=1,
+ driver=self.connection.driver))
+
+ return networks
+
+ def _extract_images(self, compute):
+ """
+ Extract image disks from a compute node XML representation.
+
+ Extract image disk descriptions from a compute node XML representation,
+ converting the disks to an NodeImage object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: :class:`NodeImage`.
+ :return: First disk attached to a compute node.
+ """
+ disks = list()
+
+ disk_list = compute.find('STORAGE')
+ if disk_list is not None:
+ for element in disk_list.findall('DISK'):
+ disks.append(
+ NodeImage(id=element.attrib.get('image', None),
+ name=None,
+ driver=self.connection.driver,
+ extra={'dev': element.attrib.get('dev', None)}))
+
+ # @TODO: Return all disks when the Node type accepts multiple
+ # attached disks per node.
+ if len(disks) > 0:
+ return disks[0]
+ else:
+ return None
+
+
+class OpenNebula_1_4_NodeDriver(OpenNebulaNodeDriver):
+ """
+ OpenNebula.org node driver for OpenNebula.org v1.4.
+ """
+
+ name = 'OpenNebula (v1.4)'
+
+
+class OpenNebula_2_0_NodeDriver(OpenNebulaNodeDriver):
+ """
+ OpenNebula.org node driver for OpenNebula.org v2.0 through OpenNebula.org
+ v2.2.
+ """
+
+ name = 'OpenNebula (v2.0 - v2.2)'
+
+ def create_node(self, **kwargs):
+ """
+ Create a new OpenNebula node.
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword networks: List of virtual networks to which this node should
+ connect. (optional)
+ :type networks: :class:`OpenNebulaNetwork` or ``list``
+ of :class:`OpenNebulaNetwork`
+
+ :keyword context: Custom (key, value) pairs to be injected into
+ compute node XML description. (optional)
+ :type context: ``dict``
+
+ :return: Instance of a newly created node.
+ :rtype: :class:`Node`
+ """
+ compute = ET.Element('COMPUTE')
+
+ name = ET.SubElement(compute, 'NAME')
+ name.text = kwargs['name']
+
+ instance_type = ET.SubElement(compute, 'INSTANCE_TYPE')
+ instance_type.text = kwargs['size'].name
+
+ disk = ET.SubElement(compute, 'DISK')
+ ET.SubElement(disk,
+ 'STORAGE',
+ {'href': '/storage/%s' % (str(kwargs['image'].id))})
+
+ if 'networks' in kwargs:
+ if not isinstance(kwargs['networks'], list):
+ kwargs['networks'] = [kwargs['networks']]
+
+ for network in kwargs['networks']:
+ nic = ET.SubElement(compute, 'NIC')
+ ET.SubElement(nic, 'NETWORK',
+ {'href': '/network/%s' % (str(network.id))})
+ if network.address:
+ ip_line = ET.SubElement(nic, 'IP')
+ ip_line.text = network.address
+
+ if 'context' in kwargs:
+ if isinstance(kwargs['context'], dict):
+ contextGroup = ET.SubElement(compute, 'CONTEXT')
+ for key, value in list(kwargs['context'].items()):
+ context = ET.SubElement(contextGroup, key.upper())
+ context.text = value
+
+ xml = ET.tostring(compute)
+ node = self.connection.request('/compute', method='POST',
+ data=xml).object
+
+ return self._to_node(node)
+
+ def destroy_node(self, node):
+ url = '/compute/%s' % (str(node.id))
+ resp = self.connection.request(url, method='DELETE')
+
+ return resp.status == httplib.NO_CONTENT
+
+ def list_sizes(self, location=None):
+ """
+ Return list of sizes on a provider.
+
+ @inherits: :class:`NodeDriver.list_sizes`
+
+ :return: List of compute node sizes supported by the cloud provider.
+ :rtype: ``list`` of :class:`OpenNebulaNodeSize`
+ """
+ return [
+ OpenNebulaNodeSize(id=1,
+ name='small',
+ ram=1024,
+ cpu=1,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ OpenNebulaNodeSize(id=2,
+ name='medium',
+ ram=4096,
+ cpu=4,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ OpenNebulaNodeSize(id=3,
+ name='large',
+ ram=8192,
+ cpu=8,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ OpenNebulaNodeSize(id=4,
+ name='custom',
+ ram=0,
+ cpu=0,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self),
+ ]
+
+ def _to_images(self, object):
+ """
+ Request a list of images and convert that list to a list of NodeImage
+ objects.
+
+ Request a list of images from the OpenNebula web interface, and
+ issue a request to convert each XML object representation of an image
+ to a NodeImage object.
+
+ :rtype: ``list`` of :class:`NodeImage`
+ :return: List of images.
+ """
+ images = []
+ for element in object.findall('STORAGE'):
+ image_id = element.attrib["href"].partition("/storage/")[2]
+ image = self.connection.request(
+ ("/storage/%s" % (image_id))).object
+ images.append(self._to_image(image))
+
+ return images
+
+ def _to_image(self, image):
+ """
+ Take XML object containing an image description and convert to
+ NodeImage object.
+
+ :type image: :class:`ElementTree`
+ :param image: XML representation of an image.
+
+ :rtype: :class:`NodeImage`
+ :return: The newly extracted :class:`NodeImage`.
+ """
+ return NodeImage(id=image.findtext('ID'),
+ name=image.findtext('NAME'),
+ driver=self.connection.driver,
+ extra={'description': image.findtext('DESCRIPTION'),
+ 'type': image.findtext('TYPE'),
+ 'size': image.findtext('SIZE'),
+ 'fstype': image.findtext('FSTYPE', None)})
+
+ def _to_node(self, compute):
+ """
+ Take XML object containing a compute node description and convert to
+ Node object.
+
+ Take XML representation containing a compute node description and
+ convert to Node object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: :class:`Node`
+ :return: The newly extracted :class:`Node`.
+ """
+ try:
+ state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()]
+ except KeyError:
+ state = NodeState.UNKNOWN
+
+ return Node(id=compute.findtext('ID'),
+ name=compute.findtext('NAME'),
+ state=state,
+ public_ips=self._extract_networks(compute),
+ private_ips=[],
+ driver=self.connection.driver,
+ image=self._extract_images(compute),
+ size=self._extract_size(compute),
+ extra={'context': self._extract_context(compute)})
+
+ def _extract_networks(self, compute):
+ """
+ Extract networks from a compute node XML representation.
+
+ Extract network descriptions from a compute node XML representation,
+ converting each network to an OpenNebulaNetwork object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: ``list`` of :class:`OpenNebulaNetwork`
+ :return: List of virtual networks attached to the compute node.
+ """
+ networks = []
+
+ for element in compute.findall('NIC'):
+ network = element.find('NETWORK')
+ network_id = network.attrib['href'].partition('/network/')[2]
+
+ networks.append(
+ OpenNebulaNetwork(id=network_id,
+ name=network.attrib.get('name', None),
+ address=element.findtext('IP'),
+ size=1,
+ driver=self.connection.driver,
+ extra={'mac': element.findtext('MAC')}))
+
+ return networks
+
+ def _extract_images(self, compute):
+ """
+ Extract image disks from a compute node XML representation.
+
+ Extract image disk descriptions from a compute node XML representation,
+ converting the disks to an NodeImage object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: ``list`` of :class:`NodeImage`
+ :return: Disks attached to a compute node.
+ """
+ disks = list()
+
+ for element in compute.findall('DISK'):
+ disk = element.find('STORAGE')
+ image_id = disk.attrib['href'].partition('/storage/')[2]
+
+ if 'id' in element.attrib:
+ disk_id = element.attrib['id']
+ else:
+ disk_id = None
+
+ disks.append(
+ NodeImage(id=image_id,
+ name=disk.attrib.get('name', None),
+ driver=self.connection.driver,
+ extra={'type': element.findtext('TYPE'),
+ 'disk_id': disk_id,
+ 'target': element.findtext('TARGET')}))
+
+ # Return all disks when the Node type accepts multiple attached disks
+ # per node.
+ if len(disks) > 1:
+ return disks
+ elif len(disks) == 1:
+ return disks[0]
+ else:
+ return None
+
+ def _extract_size(self, compute):
+ """
+ Extract size, or node type, from a compute node XML representation.
+
+ Extract node size, or node type, description from a compute node XML
+ representation, converting the node size to a NodeSize object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: :class:`OpenNebulaNodeSize`
+ :return: Node type of compute node.
+ """
+ instance_type = compute.find('INSTANCE_TYPE')
+
+ try:
+ return next((node_size for node_size in self.list_sizes()
+ if node_size.name == instance_type.text))
+ except StopIteration:
+ return None
+
+ def _extract_context(self, compute):
+ """
+ Extract size, or node type, from a compute node XML representation.
+
+ Extract node size, or node type, description from a compute node XML
+ representation, converting the node size to a NodeSize object.
+
+ :type compute: :class:`ElementTree`
+ :param compute: XML representation of a compute node.
+
+ :rtype: ``dict``
+ :return: Dictionary containing (key, value) pairs related to
+ compute node context.
+ """
+ contexts = dict()
+ context = compute.find('CONTEXT')
+
+ if context is not None:
+ for context_element in list(context):
+ contexts[context_element.tag.lower()] = context_element.text
+
+ return contexts
+
+
+class OpenNebula_3_0_NodeDriver(OpenNebula_2_0_NodeDriver):
+ """
+ OpenNebula.org node driver for OpenNebula.org v3.0.
+ """
+
+ name = 'OpenNebula (v3.0)'
+
+ def ex_node_set_save_name(self, node, name):
+ """
+ Build action representation and instruct node to commit action.
+
+ Build action representation from the compute node ID, the disk image
+ which will be saved, and the name under which the image will be saved
+ upon shutting down the compute node.
+
+ :param node: Compute node instance.
+ :type node: :class:`Node`
+
+ :param name: Name under which the image should be saved after shutting
+ down the compute node.
+ :type name: ``str``
+
+ :return: False if an HTTP Bad Request is received, else, True is
+ returned.
+ :rtype: ``bool``
+ """
+ compute_node_id = str(node.id)
+
+ compute = ET.Element('COMPUTE')
+
+ compute_id = ET.SubElement(compute, 'ID')
+ compute_id.text = compute_node_id
+
+ disk = ET.SubElement(compute, 'DISK', {'id': str(node.image.id)})
+
+ ET.SubElement(disk, 'STORAGE',
+ {'href': '/storage/%s' % (str(node.image.id)),
+ 'name': node.image.name})
+
+ ET.SubElement(disk, 'SAVE_AS', {'name': str(name)})
+
+ xml = ET.tostring(compute)
+
+ url = '/compute/%s' % compute_node_id
+ resp = self.connection.request(url, method='PUT',
+ data=xml)
+
+ if resp.status == httplib.BAD_REQUEST:
+ return False
+ else:
+ return True
+
+ def _to_network(self, element):
+ """
+ Take XML object containing a network description and convert to
+ OpenNebulaNetwork object.
+
+ Take XML representation containing a network description and
+ convert to OpenNebulaNetwork object.
+
+ :return: The newly extracted :class:`OpenNebulaNetwork`.
+ :rtype: :class:`OpenNebulaNetwork`
+ """
+ return OpenNebulaNetwork(id=element.findtext('ID'),
+ name=element.findtext('NAME'),
+ address=element.findtext('ADDRESS'),
+ size=element.findtext('SIZE'),
+ driver=self.connection.driver,
+ extra={'public': element.findtext('PUBLIC')})
+
+
+class OpenNebula_3_2_NodeDriver(OpenNebula_3_0_NodeDriver):
+ """
+ OpenNebula.org node driver for OpenNebula.org v3.2.
+ """
+
+ name = 'OpenNebula (v3.2)'
+
+ def reboot_node(self, node):
+ return self.ex_node_action(node, ACTION.REBOOT)
+
+ def list_sizes(self, location=None):
+ """
+ Return list of sizes on a provider.
+
+ @inherits: :class:`NodeDriver.list_sizes`
+
+ :return: List of compute node sizes supported by the cloud provider.
+ :rtype: ``list`` of :class:`OpenNebulaNodeSize`
+ """
+ return self._to_sizes(self.connection.request('/instance_type').object)
+
+ def _to_sizes(self, object):
+ """
+ Request a list of instance types and convert that list to a list of
+ OpenNebulaNodeSize objects.
+
+ Request a list of instance types from the OpenNebula web interface,
+ and issue a request to convert each XML object representation of an
+ instance type to an OpenNebulaNodeSize object.
+
+ :return: List of instance types.
+ :rtype: ``list`` of :class:`OpenNebulaNodeSize`
+ """
+ sizes = []
+ size_id = 1
+
+ attributes = [('name', str, None), ('ram', int, 'MEMORY'),
+ ('cpu', float, None), ('vcpu', float, None),
+ ('disk', str, None), ('bandwidth', float, None),
+ ('price', float, None)]
+
+ for element in object.findall('INSTANCE_TYPE'):
+ size_kwargs = {'id': size_id, 'driver': self}
+ values = self._get_attributes_values(attributes=attributes,
+ element=element)
+ size_kwargs.update(values)
+
+ size = OpenNebulaNodeSize(**size_kwargs)
+ sizes.append(size)
+ size_id += 1
+
+ return sizes
+
+ def _get_attributes_values(self, attributes, element):
+ values = {}
+
+ for attribute_name, attribute_type, alias in attributes:
+ key = alias if alias else attribute_name.upper()
+ value = element.findtext(key)
+
+ if value is not None:
+ value = attribute_type(value)
+
+ values[attribute_name] = value
+
+ return values
+
+
+class OpenNebula_3_6_NodeDriver(OpenNebula_3_2_NodeDriver):
+ """
+ OpenNebula.org node driver for OpenNebula.org v3.6.
+ """
+
+ name = 'OpenNebula (v3.6)'
+
+ def create_volume(self, size, name, location=None, snapshot=None):
+ storage = ET.Element('STORAGE')
+
+ vol_name = ET.SubElement(storage, 'NAME')
+ vol_name.text = name
+
+ vol_type = ET.SubElement(storage, 'TYPE')
+ vol_type.text = 'DATABLOCK'
+
+ description = ET.SubElement(storage, 'DESCRIPTION')
+ description.text = 'Attached storage'
+
+ public = ET.SubElement(storage, 'PUBLIC')
+ public.text = 'NO'
+
+ persistent = ET.SubElement(storage, 'PERSISTENT')
+ persistent.text = 'YES'
+
+ fstype = ET.SubElement(storage, 'FSTYPE')
+ fstype.text = 'ext3'
+
+ vol_size = ET.SubElement(storage, 'SIZE')
+ vol_size.text = str(size)
+
+ xml = ET.tostring(storage)
+ volume = self.connection.request('/storage',
+ {'occixml': xml},
+ method='POST').object
+
+ return self._to_volume(volume)
+
+ def destroy_volume(self, volume):
+ url = '/storage/%s' % (str(volume.id))
+ resp = self.connection.request(url, method='DELETE')
+
+ return resp.status == httplib.NO_CONTENT
+
+ def attach_volume(self, node, volume, device):
+ action = ET.Element('ACTION')
+
+ perform = ET.SubElement(action, 'PERFORM')
+ perform.text = 'ATTACHDISK'
+
+ params = ET.SubElement(action, 'PARAMS')
+
+ ET.SubElement(params,
+ 'STORAGE',
+ {'href': '/storage/%s' % (str(volume.id))})
+
+ target = ET.SubElement(params, 'TARGET')
+ target.text = device
+
+ xml = ET.tostring(action)
+
+ url = '/compute/%s/action' % node.id
+
+ resp = self.connection.request(url, method='POST', data=xml)
+ return resp.status == httplib.ACCEPTED
+
+ def _do_detach_volume(self, node_id, disk_id):
+ action = ET.Element('ACTION')
+
+ perform = ET.SubElement(action, 'PERFORM')
+ perform.text = 'DETACHDISK'
+
+ params = ET.SubElement(action, 'PARAMS')
+
+ ET.SubElement(params,
+ 'DISK',
+ {'id': disk_id})
+
+ xml = ET.tostring(action)
+
+ url = '/compute/%s/action' % node_id
+
+ resp = self.connection.request(url, method='POST', data=xml)
+ return resp.status == httplib.ACCEPTED
+
+ def detach_volume(self, volume):
+ # We need to find the node using this volume
+ for node in self.list_nodes():
+ if type(node.image) is not list:
+ # This node has only one associated image. It is not the one we
+ # are after.
+ continue
+
+ for disk in node.image:
+ if disk.id == volume.id:
+ # Node found. We can now detach the volume
+ disk_id = disk.extra['disk_id']
+ return self._do_detach_volume(node.id, disk_id)
+
+ return False
+
+ def list_volumes(self):
+ return self._to_volumes(self.connection.request('/storage').object)
+
+ def _to_volume(self, storage):
+ return StorageVolume(id=storage.findtext('ID'),
+ name=storage.findtext('NAME'),
+ size=int(storage.findtext('SIZE')),
+ driver=self.connection.driver)
+
+ def _to_volumes(self, object):
+ volumes = []
+ for storage in object.findall('STORAGE'):
+ storage_id = storage.attrib['href'].partition('/storage/')[2]
+
+ volumes.append(self._to_volume(
+ self.connection.request('/storage/%s' % storage_id).object))
+
+ return volumes
+
+
+class OpenNebula_3_8_NodeDriver(OpenNebula_3_6_NodeDriver):
+ """
+ OpenNebula.org node driver for OpenNebula.org v3.8.
+ """
+
+ name = 'OpenNebula (v3.8)'
+ plain_auth = API_PLAIN_AUTH
+
+ def _to_sizes(self, object):
+ """
+ Request a list of instance types and convert that list to a list of
+ OpenNebulaNodeSize objects.
+
+ Request a list of instance types from the OpenNebula web interface,
+ and issue a request to convert each XML object representation of an
+ instance type to an OpenNebulaNodeSize object.
+
+ :return: List of instance types.
+ :rtype: ``list`` of :class:`OpenNebulaNodeSize`
+ """
+ sizes = []
+ size_id = 1
+
+ attributes = [('name', str, None), ('ram', int, 'MEMORY'),
+ ('cpu', float, None), ('vcpu', float, None),
+ ('disk', str, None), ('bandwidth', float, None),
+ ('price', float, None)]
+
+ for element in object.findall('INSTANCE_TYPE'):
+ element = self.connection.request(
+ ('/instance_type/%s') % (element.attrib['name'])).object
+
+ size_kwargs = {'id': size_id, 'driver': self}
+ values = self._get_attributes_values(attributes=attributes,
+ element=element)
+ size_kwargs.update(values)
+
+ size = OpenNebulaNodeSize(**size_kwargs)
+ sizes.append(size)
+ size_id += 1
+ return sizes
+
+ def _ex_connection_class_kwargs(self):
+ """
+ Set plain_auth as an extra :class:`OpenNebulaConnection_3_8` argument
+
+ :return: ``dict`` of :class:`OpenNebulaConnection_3_8` input arguments
+ """
+
+ return {'plain_auth': self.plain_auth}
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/openstack.py b/awx/lib/site-packages/libcloud/compute/drivers/openstack.py
new file mode 100644
index 0000000000..c09cee6e0c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/openstack.py
@@ -0,0 +1,2439 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+OpenStack driver
+"""
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+import warnings
+import base64
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import next
+from libcloud.utils.py3 import urlparse
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.common.openstack import OpenStackBaseConnection
+from libcloud.common.openstack import OpenStackDriverMixin
+from libcloud.common.types import MalformedResponseError, ProviderError
+from libcloud.utils.networking import is_private_subnet
+from libcloud.compute.base import NodeSize, NodeImage
+from libcloud.compute.base import (NodeDriver, Node, NodeLocation,
+ StorageVolume, VolumeSnapshot)
+from libcloud.compute.base import KeyPair
+from libcloud.compute.types import NodeState, Provider
+from libcloud.compute.types import KeyPairDoesNotExistError
+from libcloud.pricing import get_size_price
+from libcloud.common.base import Response
+from libcloud.utils.xml import findall
+
+__all__ = [
+ 'OpenStack_1_0_Response',
+ 'OpenStack_1_0_Connection',
+ 'OpenStack_1_0_NodeDriver',
+ 'OpenStack_1_0_SharedIpGroup',
+ 'OpenStack_1_0_NodeIpAddresses',
+ 'OpenStack_1_1_Response',
+ 'OpenStack_1_1_Connection',
+ 'OpenStack_1_1_NodeDriver',
+ 'OpenStack_1_1_FloatingIpPool',
+ 'OpenStack_1_1_FloatingIpAddress',
+ 'OpenStackNodeDriver'
+]
+
+ATOM_NAMESPACE = "http://www.w3.org/2005/Atom"
+
+DEFAULT_API_VERSION = '1.1'
+
+
+class OpenStackException(ProviderError):
+ pass
+
+
+class OpenStackResponse(Response):
+ node_driver = None
+
+ def success(self):
+ i = int(self.status)
+ return i >= 200 and i <= 299
+
+ def has_content_type(self, content_type):
+ content_type_value = self.headers.get('content-type') or ''
+ content_type_value = content_type_value.lower()
+ return content_type_value.find(content_type.lower()) > -1
+
+ def parse_body(self):
+ if self.status == httplib.NO_CONTENT or not self.body:
+ return None
+
+ if self.has_content_type('application/xml'):
+ try:
+ return ET.XML(self.body)
+ except:
+ raise MalformedResponseError(
+ 'Failed to parse XML',
+ body=self.body,
+ driver=self.node_driver)
+
+ elif self.has_content_type('application/json'):
+ try:
+ return json.loads(self.body)
+ except:
+ raise MalformedResponseError(
+ 'Failed to parse JSON',
+ body=self.body,
+ driver=self.node_driver)
+ else:
+ return self.body
+
+ def parse_error(self):
+ text = None
+ body = self.parse_body()
+
+ if self.has_content_type('application/xml'):
+ text = '; '.join([err.text or '' for err in body.getiterator()
+ if err.text])
+ elif self.has_content_type('application/json'):
+ values = list(body.values())
+
+ context = self.connection.context
+ driver = self.connection.driver
+ key_pair_name = context.get('key_pair_name', None)
+
+ if len(values) > 0 and values[0]['code'] == 404 and key_pair_name:
+ raise KeyPairDoesNotExistError(name=key_pair_name,
+ driver=driver)
+ elif len(values) > 0 and 'message' in values[0]:
+ text = ';'.join([fault_data['message'] for fault_data
+ in values])
+ else:
+ text = body
+ else:
+ # while we hope a response is always one of xml or json, we have
+ # seen html or text in the past, its not clear we can really do
+ # something to make it more readable here, so we will just pass
+ # it along as the whole response body in the text variable.
+ text = body
+
+ return '%s %s %s' % (self.status, self.error, text)
+
+
+class OpenStackComputeConnection(OpenStackBaseConnection):
+ # default config for http://devstack.org/
+ service_type = 'compute'
+ service_name = 'nova'
+ service_region = 'RegionOne'
+
+ def request(self, action, params=None, data='', headers=None,
+ method='GET'):
+ if not headers:
+ headers = {}
+ if not params:
+ params = {}
+
+ if method in ("POST", "PUT"):
+ headers = {'Content-Type': self.default_content_type}
+
+ return super(OpenStackComputeConnection, self).request(
+ action=action,
+ params=params, data=data,
+ method=method, headers=headers)
+
+
+class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin):
+ """
+ Base OpenStack node driver. Should not be used directly.
+ """
+ api_name = 'openstack'
+ name = 'OpenStack'
+ website = 'http://openstack.org/'
+
+ NODE_STATE_MAP = {
+ 'BUILD': NodeState.PENDING,
+ 'REBUILD': NodeState.PENDING,
+ 'ACTIVE': NodeState.RUNNING,
+ 'SUSPENDED': NodeState.TERMINATED,
+ 'DELETED': NodeState.TERMINATED,
+ 'QUEUE_RESIZE': NodeState.PENDING,
+ 'PREP_RESIZE': NodeState.PENDING,
+ 'VERIFY_RESIZE': NodeState.RUNNING,
+ 'PASSWORD': NodeState.PENDING,
+ 'RESCUE': NodeState.PENDING,
+ 'REBOOT': NodeState.REBOOTING,
+ 'HARD_REBOOT': NodeState.REBOOTING,
+ 'SHARE_IP': NodeState.PENDING,
+ 'SHARE_IP_NO_CONFIG': NodeState.PENDING,
+ 'DELETE_IP': NodeState.PENDING,
+ 'UNKNOWN': NodeState.UNKNOWN
+ }
+
+ def __new__(cls, key, secret=None, secure=True, host=None, port=None,
+ api_version=DEFAULT_API_VERSION, **kwargs):
+ if cls is OpenStackNodeDriver:
+ if api_version == '1.0':
+ cls = OpenStack_1_0_NodeDriver
+ elif api_version == '1.1':
+ cls = OpenStack_1_1_NodeDriver
+ else:
+ raise NotImplementedError(
+ "No OpenStackNodeDriver found for API version %s" %
+ (api_version))
+ return super(OpenStackNodeDriver, cls).__new__(cls)
+
+ def __init__(self, *args, **kwargs):
+ OpenStackDriverMixin.__init__(self, **kwargs)
+ super(OpenStackNodeDriver, self).__init__(*args, **kwargs)
+
+ def destroy_node(self, node):
+ uri = '/servers/%s' % (node.id)
+ resp = self.connection.request(uri, method='DELETE')
+ # The OpenStack and Rackspace documentation both say this API will
+ # return a 204, but in-fact, everyone everywhere agrees it actually
+ # returns a 202, so we are going to accept either, and someday,
+ # someone will fix either the implementation or the documentation to
+ # agree.
+ return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
+
+ def reboot_node(self, node):
+ return self._reboot_node(node, reboot_type='HARD')
+
+ def list_nodes(self, ex_all_tenants=False):
+ """
+ List the nodes in a tenant
+
+ :param ex_all_tenants: List nodes for all the tenants. Note: Your user
+ must have admin privileges for this
+ functionality to work.
+ :type ex_all_tenants: ``bool``
+ """
+ params = {}
+ if ex_all_tenants:
+ params = {'all_tenants': 1}
+ return self._to_nodes(
+ self.connection.request('/servers/detail', params=params).object)
+
+ def create_volume(self, size, name, location=None, snapshot=None):
+ if snapshot:
+ raise NotImplementedError(
+ "create_volume does not yet support create from snapshot")
+ return self.connection.request('/os-volumes',
+ method='POST',
+ data={
+ 'volume': {
+ 'display_name': name,
+ 'display_description': name,
+ 'size': size,
+ 'volume_type': None,
+ 'metadata': {
+ 'contents': name,
+ },
+ 'availability_zone': location,
+ }
+ }).success()
+
+ def destroy_volume(self, volume):
+ return self.connection.request('/os-volumes/%s' % volume.id,
+ method='DELETE').success()
+
+ def attach_volume(self, node, volume, device="auto"):
+ # when "auto" or None is provided for device, openstack will let
+ # the guest OS pick the next available device (fi. /dev/vdb)
+ return self.connection.request(
+ '/servers/%s/os-volume_attachments' % node.id,
+ method='POST',
+ data={
+ 'volumeAttachment': {
+ 'volumeId': volume.id,
+ 'device': device,
+ }
+ }).success()
+
+ def detach_volume(self, volume, ex_node=None):
+ # when ex_node is not provided, volume is detached from all nodes
+ failed_nodes = []
+ for attachment in volume.extra['attachments']:
+ if not ex_node or ex_node.id == attachment['serverId']:
+ response = self.connection.request(
+ '/servers/%s/os-volume_attachments/%s' %
+ (attachment['serverId'], attachment['id']),
+ method='DELETE')
+
+ if not response.success():
+ failed_nodes.append(attachment['serverId'])
+ if failed_nodes:
+ raise OpenStackException(
+ 'detach_volume failed for nodes with id: %s' %
+ ', '.join(failed_nodes), 500, self
+ )
+ return True
+
+ def list_volumes(self):
+ return self._to_volumes(
+ self.connection.request('/os-volumes').object)
+
+ def ex_get_volume(self, volumeId):
+ return self._to_volume(
+ self.connection.request('/os-volumes/%s' % volumeId).object)
+
+ def list_images(self, location=None, ex_only_active=True):
+ """
+ Lists all active images
+
+ @inherits: :class:`NodeDriver.list_images`
+
+ :param ex_only_active: True if list only active
+ :type ex_only_active: ``bool``
+
+ """
+ return self._to_images(
+ self.connection.request('/images/detail').object, ex_only_active)
+
+ def get_image(self, image_id):
+ """
+ Get an image based on a image_id
+
+ @inherits: :class:`NodeDriver.get_image`
+
+ :param image_id: Image identifier
+ :type image_id: ``str``
+
+ :return: A NodeImage object
+ :rtype: :class:`NodeImage`
+
+ """
+ return self._to_image(self.connection.request(
+ '/images/%s' % (image_id,)).object['image'])
+
+ def list_sizes(self, location=None):
+ return self._to_sizes(
+ self.connection.request('/flavors/detail').object)
+
+ def list_locations(self):
+ return [NodeLocation(0, '', '', self)]
+
+ def _ex_connection_class_kwargs(self):
+ return self.openstack_connection_kwargs()
+
+ def ex_get_node_details(self, node_id):
+ """
+ Lists details of the specified server.
+
+ :param node_id: ID of the node which should be used
+ :type node_id: ``str``
+
+ :rtype: :class:`Node`
+ """
+ # @TODO: Remove this if in 0.6
+ if isinstance(node_id, Node):
+ node_id = node_id.id
+
+ uri = '/servers/%s' % (node_id)
+ resp = self.connection.request(uri, method='GET')
+ if resp.status == httplib.NOT_FOUND:
+ return None
+
+ return self._to_node_from_obj(resp.object)
+
+ def ex_soft_reboot_node(self, node):
+ """
+ Soft reboots the specified server
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ return self._reboot_node(node, reboot_type='SOFT')
+
+ def ex_hard_reboot_node(self, node):
+ """
+ Hard reboots the specified server
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ return self._reboot_node(node, reboot_type='HARD')
+
+
+class OpenStackNodeSize(NodeSize):
+ """
+ NodeSize class for the OpenStack.org driver.
+
+ Following the example of OpenNebula.org driver
+ and following guidelines:
+ https://issues.apache.org/jira/browse/LIBCLOUD-119
+ """
+
+ def __init__(self, id, name, ram, disk, bandwidth, price, driver,
+ vcpus=None):
+ super(OpenStackNodeSize, self).__init__(id=id, name=name, ram=ram,
+ disk=disk,
+ bandwidth=bandwidth,
+ price=price, driver=driver)
+ self.vcpus = vcpus
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.ram, self.disk, self.bandwidth,
+ self.price, self.driver.name, self.vcpus))
+
+
+class OpenStack_1_0_Response(OpenStackResponse):
+ def __init__(self, *args, **kwargs):
+ # done because of a circular reference from
+ # NodeDriver -> Connection -> Response
+ self.node_driver = OpenStack_1_0_NodeDriver
+ super(OpenStack_1_0_Response, self).__init__(*args, **kwargs)
+
+
+class OpenStack_1_0_Connection(OpenStackComputeConnection):
+ responseCls = OpenStack_1_0_Response
+ default_content_type = 'application/xml; charset=UTF-8'
+ accept_format = 'application/xml'
+ XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0'
+
+
+class OpenStack_1_0_NodeDriver(OpenStackNodeDriver):
+ """
+ OpenStack node driver.
+
+ Extra node attributes:
+ - password: root password, available after create.
+ - hostId: represents the host your cloud server runs on
+ - imageId: id of image
+ - flavorId: id of flavor
+ """
+ connectionCls = OpenStack_1_0_Connection
+ type = Provider.OPENSTACK
+
+ features = {'create_node': ['generates_password']}
+
+ def __init__(self, *args, **kwargs):
+ self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
+ None))
+ self.XML_NAMESPACE = self.connectionCls.XML_NAMESPACE
+ super(OpenStack_1_0_NodeDriver, self).__init__(*args, **kwargs)
+
+ def _to_images(self, object, ex_only_active):
+ images = []
+ for image in findall(object, 'image', self.XML_NAMESPACE):
+ if ex_only_active and image.get('status') != 'ACTIVE':
+ continue
+ images.append(self._to_image(image))
+
+ return images
+
+ def _to_image(self, element):
+ return NodeImage(id=element.get('id'),
+ name=element.get('name'),
+ driver=self.connection.driver,
+ extra={'updated': element.get('updated'),
+ 'created': element.get('created'),
+ 'status': element.get('status'),
+ 'serverId': element.get('serverId'),
+ 'progress': element.get('progress'),
+ 'minDisk': element.get('minDisk'),
+ 'minRam': element.get('minRam')
+ }
+ )
+
+ def _change_password_or_name(self, node, name=None, password=None):
+ uri = '/servers/%s' % (node.id)
+
+ if not name:
+ name = node.name
+
+ body = {'xmlns': self.XML_NAMESPACE,
+ 'name': name}
+
+ if password is not None:
+ body['adminPass'] = password
+
+ server_elm = ET.Element('server', body)
+
+ resp = self.connection.request(
+ uri, method='PUT', data=ET.tostring(server_elm))
+
+ if resp.status == httplib.NO_CONTENT and password is not None:
+ node.extra['password'] = password
+
+ return resp.status == httplib.NO_CONTENT
+
+ def create_node(self, **kwargs):
+ """
+ Create a new node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_metadata: Key/Value metadata to associate with a node
+ :type ex_metadata: ``dict``
+
+ :keyword ex_files: File Path => File contents to create on
+ the node
+ :type ex_files: ``dict``
+
+ :keyword ex_shared_ip_group_id: The server is launched into
+ that shared IP group
+ :type ex_shared_ip_group_id: ``str``
+ """
+ name = kwargs['name']
+ image = kwargs['image']
+ size = kwargs['size']
+
+ attributes = {'xmlns': self.XML_NAMESPACE,
+ 'name': name,
+ 'imageId': str(image.id),
+ 'flavorId': str(size.id)}
+
+ if 'ex_shared_ip_group' in kwargs:
+ # Deprecate this. Be explicit and call the variable
+ # ex_shared_ip_group_id since user needs to pass in the id, not the
+ # name.
+ warnings.warn('ex_shared_ip_group argument is deprecated.'
+ ' Please use ex_shared_ip_group_id')
+
+ if 'ex_shared_ip_group_id' in kwargs:
+ shared_ip_group_id = kwargs['ex_shared_ip_group_id']
+ attributes['sharedIpGroupId'] = shared_ip_group_id
+
+ server_elm = ET.Element('server', attributes)
+
+ metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {}))
+ if metadata_elm:
+ server_elm.append(metadata_elm)
+
+ files_elm = self._files_to_xml(kwargs.get("ex_files", {}))
+ if files_elm:
+ server_elm.append(files_elm)
+
+ resp = self.connection.request("/servers",
+ method='POST',
+ data=ET.tostring(server_elm))
+ return self._to_node(resp.object)
+
+ def ex_set_password(self, node, password):
+ """
+ Sets the Node's root password.
+
+ This will reboot the instance to complete the operation.
+
+ :class:`Node.extra['password']` will be set to the new value if the
+ operation was successful.
+
+ :param node: node to set password
+ :type node: :class:`Node`
+
+ :param password: new password.
+ :type password: ``str``
+
+ :rtype: ``bool``
+ """
+ return self._change_password_or_name(node, password=password)
+
+ def ex_set_server_name(self, node, name):
+ """
+ Sets the Node's name.
+
+ This will reboot the instance to complete the operation.
+
+ :param node: node to set name
+ :type node: :class:`Node`
+
+ :param name: new name
+ :type name: ``str``
+
+ :rtype: ``bool``
+ """
+ return self._change_password_or_name(node, name=name)
+
+ def ex_resize(self, node, size):
+ """
+ Change an existing server flavor / scale the server up or down.
+
+ :param node: node to resize.
+ :type node: :class:`Node`
+
+ :param size: new size.
+ :type size: :class:`NodeSize`
+
+ :rtype: ``bool``
+ """
+ elm = ET.Element(
+ 'resize',
+ {'xmlns': self.XML_NAMESPACE,
+ 'flavorId': str(size.id)}
+ )
+
+ resp = self.connection.request("/servers/%s/action" % (node.id),
+ method='POST',
+ data=ET.tostring(elm))
+ return resp.status == httplib.ACCEPTED
+
+ def ex_confirm_resize(self, node):
+ """
+ Confirm a resize request which is currently in progress. If a resize
+ request is not explicitly confirmed or reverted it's automatically
+ confirmed after 24 hours.
+
+ For more info refer to the API documentation: http://goo.gl/zjFI1
+
+ :param node: node for which the resize request will be confirmed.
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ elm = ET.Element(
+ 'confirmResize',
+ {'xmlns': self.XML_NAMESPACE},
+ )
+
+ resp = self.connection.request("/servers/%s/action" % (node.id),
+ method='POST',
+ data=ET.tostring(elm))
+ return resp.status == httplib.NO_CONTENT
+
+ def ex_revert_resize(self, node):
+ """
+ Revert a resize request which is currently in progress.
+ All resizes are automatically confirmed after 24 hours if they have
+ not already been confirmed explicitly or reverted.
+
+ For more info refer to the API documentation: http://goo.gl/AizBu
+
+ :param node: node for which the resize request will be reverted.
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ elm = ET.Element(
+ 'revertResize',
+ {'xmlns': self.XML_NAMESPACE}
+ )
+
+ resp = self.connection.request("/servers/%s/action" % (node.id),
+ method='POST',
+ data=ET.tostring(elm))
+ return resp.status == httplib.NO_CONTENT
+
+ def ex_rebuild(self, node_id, image_id):
+ """
+ Rebuilds the specified server.
+
+ :param node_id: ID of the node which should be used
+ :type node_id: ``str``
+
+ :param image_id: ID of the image which should be used
+ :type image_id: ``str``
+
+ :rtype: ``bool``
+ """
+ # @TODO: Remove those ifs in 0.6
+ if isinstance(node_id, Node):
+ node_id = node_id.id
+
+ if isinstance(image_id, NodeImage):
+ image_id = image_id.id
+
+ elm = ET.Element(
+ 'rebuild',
+ {'xmlns': self.XML_NAMESPACE,
+ 'imageId': image_id}
+ )
+
+ resp = self.connection.request("/servers/%s/action" % node_id,
+ method='POST',
+ data=ET.tostring(elm))
+ return resp.status == httplib.ACCEPTED
+
+ def ex_create_ip_group(self, group_name, node_id=None):
+ """
+ Creates a shared IP group.
+
+ :param group_name: group name which should be used
+ :type group_name: ``str``
+
+ :param node_id: ID of the node which should be used
+ :type node_id: ``str``
+
+ :rtype: ``bool``
+ """
+ # @TODO: Remove this if in 0.6
+ if isinstance(node_id, Node):
+ node_id = node_id.id
+
+ group_elm = ET.Element(
+ 'sharedIpGroup',
+ {'xmlns': self.XML_NAMESPACE,
+ 'name': group_name}
+ )
+
+ if node_id:
+ ET.SubElement(
+ group_elm,
+ 'server',
+ {'id': node_id}
+ )
+
+ resp = self.connection.request('/shared_ip_groups',
+ method='POST',
+ data=ET.tostring(group_elm))
+ return self._to_shared_ip_group(resp.object)
+
+ def ex_list_ip_groups(self, details=False):
+ """
+ Lists IDs and names for shared IP groups.
+ If details lists all details for shared IP groups.
+
+ :param details: True if details is required
+ :type details: ``bool``
+
+ :rtype: ``list`` of :class:`OpenStack_1_0_SharedIpGroup`
+ """
+ uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups'
+ resp = self.connection.request(uri,
+ method='GET')
+ groups = findall(resp.object, 'sharedIpGroup',
+ self.XML_NAMESPACE)
+ return [self._to_shared_ip_group(el) for el in groups]
+
+ def ex_delete_ip_group(self, group_id):
+ """
+ Deletes the specified shared IP group.
+
+ :param group_id: group id which should be used
+ :type group_id: ``str``
+
+ :rtype: ``bool``
+ """
+ uri = '/shared_ip_groups/%s' % group_id
+ resp = self.connection.request(uri, method='DELETE')
+ return resp.status == httplib.NO_CONTENT
+
+ def ex_share_ip(self, group_id, node_id, ip, configure_node=True):
+ """
+ Shares an IP address to the specified server.
+
+ :param group_id: group id which should be used
+ :type group_id: ``str``
+
+ :param node_id: ID of the node which should be used
+ :type node_id: ``str``
+
+ :param ip: ip which should be used
+ :type ip: ``str``
+
+ :param configure_node: configure node
+ :type configure_node: ``bool``
+
+ :rtype: ``bool``
+ """
+ # @TODO: Remove this if in 0.6
+ if isinstance(node_id, Node):
+ node_id = node_id.id
+
+ if configure_node:
+ str_configure = 'true'
+ else:
+ str_configure = 'false'
+
+ elm = ET.Element(
+ 'shareIp',
+ {'xmlns': self.XML_NAMESPACE,
+ 'sharedIpGroupId': group_id,
+ 'configureServer': str_configure},
+ )
+
+ uri = '/servers/%s/ips/public/%s' % (node_id, ip)
+
+ resp = self.connection.request(uri,
+ method='PUT',
+ data=ET.tostring(elm))
+ return resp.status == httplib.ACCEPTED
+
+ def ex_unshare_ip(self, node_id, ip):
+ """
+ Removes a shared IP address from the specified server.
+
+ :param node_id: ID of the node which should be used
+ :type node_id: ``str``
+
+ :param ip: ip which should be used
+ :type ip: ``str``
+
+ :rtype: ``bool``
+ """
+ # @TODO: Remove this if in 0.6
+ if isinstance(node_id, Node):
+ node_id = node_id.id
+
+ uri = '/servers/%s/ips/public/%s' % (node_id, ip)
+
+ resp = self.connection.request(uri,
+ method='DELETE')
+ return resp.status == httplib.ACCEPTED
+
+ def ex_list_ip_addresses(self, node_id):
+ """
+ List all server addresses.
+
+ :param node_id: ID of the node which should be used
+ :type node_id: ``str``
+
+ :rtype: :class:`OpenStack_1_0_NodeIpAddresses`
+ """
+ # @TODO: Remove this if in 0.6
+ if isinstance(node_id, Node):
+ node_id = node_id.id
+
+ uri = '/servers/%s/ips' % node_id
+ resp = self.connection.request(uri,
+ method='GET')
+ return self._to_ip_addresses(resp.object)
+
+ def _metadata_to_xml(self, metadata):
+ if len(metadata) == 0:
+ return None
+
+ metadata_elm = ET.Element('metadata')
+ for k, v in list(metadata.items()):
+ meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k)})
+ meta_elm.text = str(v)
+
+ return metadata_elm
+
+ def _files_to_xml(self, files):
+ if len(files) == 0:
+ return None
+
+ personality_elm = ET.Element('personality')
+ for k, v in list(files.items()):
+ file_elm = ET.SubElement(personality_elm,
+ 'file',
+ {'path': str(k)})
+ file_elm.text = base64.b64encode(b(v))
+
+ return personality_elm
+
+ def _reboot_node(self, node, reboot_type='SOFT'):
+ resp = self._node_action(node, ['reboot', ('type', reboot_type)])
+ return resp.status == httplib.ACCEPTED
+
+ def _node_action(self, node, body):
+ if isinstance(body, list):
+ attr = ' '.join(['%s="%s"' % (item[0], item[1])
+ for item in body[1:]])
+ body = '<%s xmlns="%s" %s/>' % (body[0], self.XML_NAMESPACE, attr)
+ uri = '/servers/%s/action' % (node.id)
+ resp = self.connection.request(uri, method='POST', data=body)
+ return resp
+
+ def _to_nodes(self, object):
+ node_elements = findall(object, 'server', self.XML_NAMESPACE)
+ return [self._to_node(el) for el in node_elements]
+
+ def _to_node_from_obj(self, obj):
+ return self._to_node(findall(obj, 'server', self.XML_NAMESPACE)[0])
+
+ def _to_node(self, el):
+ def get_ips(el):
+ return [ip.get('addr') for ip in el]
+
+ def get_meta_dict(el):
+ d = {}
+ for meta in el:
+ d[meta.get('key')] = meta.text
+ return d
+
+ public_ip = get_ips(findall(el, 'addresses/public/ip',
+ self.XML_NAMESPACE))
+ private_ip = get_ips(findall(el, 'addresses/private/ip',
+ self.XML_NAMESPACE))
+ metadata = get_meta_dict(findall(el, 'metadata/meta',
+ self.XML_NAMESPACE))
+
+ n = Node(id=el.get('id'),
+ name=el.get('name'),
+ state=self.NODE_STATE_MAP.get(
+ el.get('status'), NodeState.UNKNOWN),
+ public_ips=public_ip,
+ private_ips=private_ip,
+ driver=self.connection.driver,
+ extra={
+ 'password': el.get('adminPass'),
+ 'hostId': el.get('hostId'),
+ 'imageId': el.get('imageId'),
+ 'flavorId': el.get('flavorId'),
+ 'uri': "https://%s%s/servers/%s" % (
+ self.connection.host,
+ self.connection.request_path, el.get('id')),
+ 'metadata': metadata})
+ return n
+
+ def _to_sizes(self, object):
+ elements = findall(object, 'flavor', self.XML_NAMESPACE)
+ return [self._to_size(el) for el in elements]
+
+ def _to_size(self, el):
+ vcpus = int(el.get('vcpus')) if el.get('vcpus', None) else None
+ return OpenStackNodeSize(id=el.get('id'),
+ name=el.get('name'),
+ ram=int(el.get('ram')),
+ disk=int(el.get('disk')),
+ # XXX: needs hardcode
+ vcpus=vcpus,
+ bandwidth=None,
+ # Hardcoded
+ price=self._get_size_price(el.get('id')),
+ driver=self.connection.driver)
+
+ def ex_limits(self):
+ """
+ Extra call to get account's limits, such as
+ rates (for example amount of POST requests per day)
+ and absolute limits like total amount of available
+ RAM to be used by servers.
+
+ :return: dict with keys 'rate' and 'absolute'
+ :rtype: ``dict``
+ """
+
+ def _to_rate(el):
+ rate = {}
+ for item in list(el.items()):
+ rate[item[0]] = item[1]
+
+ return rate
+
+ def _to_absolute(el):
+ return {el.get('name'): el.get('value')}
+
+ limits = self.connection.request("/limits").object
+ rate = [_to_rate(el) for el in findall(limits, 'rate/limit',
+ self.XML_NAMESPACE)]
+ absolute = {}
+ for item in findall(limits, 'absolute/limit',
+ self.XML_NAMESPACE):
+ absolute.update(_to_absolute(item))
+
+ return {"rate": rate, "absolute": absolute}
+
+ def create_image(self, node, name, description=None, reboot=True):
+ """Create an image for node.
+
+ @inherits: :class:`NodeDriver.create_image`
+
+ :param node: node to use as a base for image
+ :type node: :class:`Node`
+
+ :param name: name for new image
+ :type name: ``str``
+
+ :rtype: :class:`NodeImage`
+ """
+
+ image_elm = ET.Element(
+ 'image',
+ {'xmlns': self.XML_NAMESPACE,
+ 'name': name,
+ 'serverId': node.id}
+ )
+
+ return self._to_image(
+ self.connection.request("/images", method="POST",
+ data=ET.tostring(image_elm)).object)
+
+ def delete_image(self, image):
+ """Delete an image for node.
+
+ @inherits: :class:`NodeDriver.delete_image`
+
+ :param image: the image to be deleted
+ :type image: :class:`NodeImage`
+
+ :rtype: ``bool``
+ """
+ uri = '/images/%s' % image.id
+ resp = self.connection.request(uri, method='DELETE')
+ return resp.status == httplib.NO_CONTENT
+
+ def _to_shared_ip_group(self, el):
+ servers_el = findall(el, 'servers', self.XML_NAMESPACE)
+ if servers_el:
+ servers = [s.get('id')
+ for s in findall(servers_el[0], 'server',
+ self.XML_NAMESPACE)]
+ else:
+ servers = None
+ return OpenStack_1_0_SharedIpGroup(id=el.get('id'),
+ name=el.get('name'),
+ servers=servers)
+
+ def _to_ip_addresses(self, el):
+ public_ips = [ip.get('addr') for ip in findall(
+ findall(el, 'public', self.XML_NAMESPACE)[0],
+ 'ip', self.XML_NAMESPACE)]
+ private_ips = [ip.get('addr') for ip in findall(
+ findall(el, 'private', self.XML_NAMESPACE)[0],
+ 'ip', self.XML_NAMESPACE)]
+
+ return OpenStack_1_0_NodeIpAddresses(public_ips, private_ips)
+
+ def _get_size_price(self, size_id):
+ try:
+ return get_size_price(driver_type='compute',
+ driver_name=self.api_name,
+ size_id=size_id)
+ except KeyError:
+ return 0.0
+
+
+class OpenStack_1_0_SharedIpGroup(object):
+ """
+ Shared IP group info.
+ """
+
+ def __init__(self, id, name, servers=None):
+ self.id = str(id)
+ self.name = name
+ self.servers = servers
+
+
+class OpenStack_1_0_NodeIpAddresses(object):
+ """
+ List of public and private IP addresses of a Node.
+ """
+
+ def __init__(self, public_addresses, private_addresses):
+ self.public_addresses = public_addresses
+ self.private_addresses = private_addresses
+
+
+class OpenStack_1_1_Response(OpenStackResponse):
+ def __init__(self, *args, **kwargs):
+ # done because of a circular reference from
+ # NodeDriver -> Connection -> Response
+ self.node_driver = OpenStack_1_1_NodeDriver
+ super(OpenStack_1_1_Response, self).__init__(*args, **kwargs)
+
+
+class OpenStackNetwork(object):
+ """
+ A Virtual Network.
+ """
+
+ def __init__(self, id, name, cidr, driver, extra=None):
+ self.id = str(id)
+ self.name = name
+ self.cidr = cidr
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return '' % (self.id,
+ self.name,
+ self.cidr,)
+
+
+class OpenStackSecurityGroup(object):
+ """
+ A Security Group.
+ """
+
+ def __init__(self, id, tenant_id, name, description, driver, rules=None,
+ extra=None):
+ """
+ Constructor.
+
+ :keyword id: Group id.
+ :type id: ``str``
+
+ :keyword tenant_id: Owner of the security group.
+ :type tenant_id: ``str``
+
+ :keyword name: Human-readable name for the security group. Might
+ not be unique.
+ :type name: ``str``
+
+ :keyword description: Human-readable description of a security
+ group.
+ :type description: ``str``
+
+ :keyword rules: Rules associated with this group.
+ :type rules: ``list`` of
+ :class:`OpenStackSecurityGroupRule`
+
+ :keyword extra: Extra attributes associated with this group.
+ :type extra: ``dict``
+ """
+ self.id = id
+ self.tenant_id = tenant_id
+ self.name = name
+ self.description = description
+ self.driver = driver
+ self.rules = rules or []
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return ('' % (self.id, self.tenant_id, self.name,
+ self.description))
+
+
+class OpenStackSecurityGroupRule(object):
+ """
+ A Rule of a Security Group.
+ """
+
+ def __init__(self, id, parent_group_id, ip_protocol, from_port, to_port,
+ driver, ip_range=None, group=None, tenant_id=None,
+ extra=None):
+ """
+ Constructor.
+
+ :keyword id: Rule id.
+ :type id: ``str``
+
+ :keyword parent_group_id: ID of the parent security group.
+ :type parent_group_id: ``str``
+
+ :keyword ip_protocol: IP Protocol (icmp, tcp, udp, etc).
+ :type ip_protocol: ``str``
+
+ :keyword from_port: Port at start of range.
+ :type from_port: ``int``
+
+ :keyword to_port: Port at end of range.
+ :type to_port: ``int``
+
+ :keyword ip_range: CIDR for address range.
+ :type ip_range: ``str``
+
+ :keyword group: Name of a source security group to apply to rule.
+ :type group: ``str``
+
+ :keyword tenant_id: Owner of the security group.
+ :type tenant_id: ``str``
+
+ :keyword extra: Extra attributes associated with this rule.
+ :type extra: ``dict``
+ """
+ self.id = id
+ self.parent_group_id = parent_group_id
+ self.ip_protocol = ip_protocol
+ self.from_port = from_port
+ self.to_port = to_port
+ self.driver = driver
+ self.ip_range = ''
+ self.group = {}
+
+ if group is None:
+ self.ip_range = ip_range
+ else:
+ self.group = {'name': group, 'tenant_id': tenant_id}
+
+ self.tenant_id = tenant_id
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return ('' % (self.id,
+ self.parent_group_id, self.ip_protocol, self.from_port,
+ self.to_port))
+
+
+class OpenStackKeyPair(object):
+ """
+ A KeyPair.
+ """
+
+ def __init__(self, name, fingerprint, public_key, driver, private_key=None,
+ extra=None):
+ """
+ Constructor.
+
+ :keyword name: Name of the KeyPair.
+ :type name: ``str``
+
+ :keyword fingerprint: Fingerprint of the KeyPair
+ :type fingerprint: ``str``
+
+ :keyword public_key: Public key in OpenSSH format.
+ :type public_key: ``str``
+
+ :keyword private_key: Private key in PEM format.
+ :type private_key: ``str``
+
+ :keyword extra: Extra attributes associated with this KeyPair.
+ :type extra: ``dict``
+ """
+ self.name = name
+ self.fingerprint = fingerprint
+ self.public_key = public_key
+ self.private_key = private_key
+ self.driver = driver
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return (''
+ % (self.name, self.fingerprint, self.public_key))
+
+
+class OpenStack_1_1_Connection(OpenStackComputeConnection):
+ responseCls = OpenStack_1_1_Response
+ accept_format = 'application/json'
+ default_content_type = 'application/json; charset=UTF-8'
+
+ def encode_data(self, data):
+ return json.dumps(data)
+
+
+class OpenStack_1_1_NodeDriver(OpenStackNodeDriver):
+ """
+ OpenStack node driver.
+ """
+ connectionCls = OpenStack_1_1_Connection
+ type = Provider.OPENSTACK
+
+ features = {"create_node": ["generates_password"]}
+ _networks_url_prefix = '/os-networks'
+
+ def __init__(self, *args, **kwargs):
+ self._ex_force_api_version = str(kwargs.pop('ex_force_api_version',
+ None))
+ super(OpenStack_1_1_NodeDriver, self).__init__(*args, **kwargs)
+
+ def create_node(self, **kwargs):
+ """Create a new node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_keyname: The name of the key pair
+ :type ex_keyname: ``str``
+
+ :keyword ex_userdata: String containing user data
+ see
+ https://help.ubuntu.com/community/CloudInit
+ :type ex_userdata: ``str``
+
+ :keyword ex_security_groups: List of security groups to assign to
+ the node
+ :type ex_security_groups: ``list`` of
+ :class:`OpenStackSecurityGroup`
+
+ :keyword ex_metadata: Key/Value metadata to associate with a node
+ :type ex_metadata: ``dict``
+
+ :keyword ex_files: File Path => File contents to create on
+ the no de
+ :type ex_files: ``dict``
+
+
+ :keyword networks: The server is launched into a set of Networks.
+ :type networks: :class:`OpenStackNetwork`
+
+ :keyword ex_disk_config: Name of the disk configuration.
+ Can be either ``AUTO`` or ``MANUAL``.
+ :type ex_disk_config: ``str``
+
+ :keyword ex_admin_pass: The root password for the node
+ :type ex_admin_pass: ``str``
+
+ :keyword ex_availability_zone: Nova availability zone for the node
+ :type ex_availability_zone: ``str``
+ """
+
+ server_params = self._create_args_to_params(None, **kwargs)
+
+ resp = self.connection.request("/servers",
+ method='POST',
+ data={'server': server_params})
+
+ create_response = resp.object['server']
+ server_resp = self.connection.request(
+ '/servers/%s' % create_response['id'])
+ server_object = server_resp.object['server']
+
+ # adminPass is not always present
+ # http://docs.openstack.org/essex/openstack-compute/admin/
+ # content/configuring-compute-API.html#d6e1833
+ server_object['adminPass'] = create_response.get('adminPass', None)
+
+ return self._to_node(server_object)
+
+ def _to_images(self, obj, ex_only_active):
+ images = []
+ for image in obj['images']:
+ if ex_only_active and image.get('status') != 'ACTIVE':
+ continue
+ images.append(self._to_image(image))
+
+ return images
+
+ def _to_image(self, api_image):
+ server = api_image.get('server', {})
+ return NodeImage(
+ id=api_image['id'],
+ name=api_image['name'],
+ driver=self,
+ extra=dict(
+ updated=api_image['updated'],
+ created=api_image['created'],
+ status=api_image['status'],
+ progress=api_image.get('progress'),
+ metadata=api_image.get('metadata'),
+ serverId=server.get('id'),
+ minDisk=api_image.get('minDisk'),
+ minRam=api_image.get('minRam'),
+ )
+ )
+
+ def _to_nodes(self, obj):
+ servers = obj['servers']
+ return [self._to_node(server) for server in servers]
+
+ def _to_volumes(self, obj):
+ volumes = obj['volumes']
+ return [self._to_volume(volume) for volume in volumes]
+
+ def _to_snapshots(self, obj):
+ snapshots = obj['snapshots']
+ return [self._to_snapshot(snapshot) for snapshot in snapshots]
+
+ def _to_sizes(self, obj):
+ flavors = obj['flavors']
+ return [self._to_size(flavor) for flavor in flavors]
+
+ def _create_args_to_params(self, node, **kwargs):
+ server_params = {
+ 'name': kwargs.get('name'),
+ 'metadata': kwargs.get('ex_metadata', {}),
+ 'personality': self._files_to_personality(kwargs.get("ex_files",
+ {}))
+ }
+
+ if 'ex_availability_zone' in kwargs:
+ server_params['availability_zone'] = kwargs['ex_availability_zone']
+
+ if 'ex_keyname' in kwargs:
+ server_params['key_name'] = kwargs['ex_keyname']
+
+ if 'ex_userdata' in kwargs:
+ server_params['user_data'] = base64.b64encode(
+ b(kwargs['ex_userdata'])).decode('ascii')
+
+ if 'ex_disk_config' in kwargs:
+ server_params['OS-DCF:diskConfig'] = kwargs['ex_disk_config']
+
+ if 'ex_admin_pass' in kwargs:
+ server_params['adminPass'] = kwargs['ex_admin_pass']
+
+ if 'networks' in kwargs:
+ networks = kwargs['networks']
+ networks = [{'uuid': network.id} for network in networks]
+ server_params['networks'] = networks
+
+ if 'ex_security_groups' in kwargs:
+ server_params['security_groups'] = []
+ for security_group in kwargs['ex_security_groups']:
+ name = security_group.name
+ server_params['security_groups'].append({'name': name})
+
+ if 'name' in kwargs:
+ server_params['name'] = kwargs.get('name')
+ else:
+ server_params['name'] = node.name
+
+ if 'image' in kwargs:
+ server_params['imageRef'] = kwargs.get('image').id
+ else:
+ server_params['imageRef'] = node.extra.get('imageId')
+
+ if 'size' in kwargs:
+ server_params['flavorRef'] = kwargs.get('size').id
+ else:
+ server_params['flavorRef'] = node.extra.get('flavorId')
+
+ return server_params
+
+ def _files_to_personality(self, files):
+ rv = []
+
+ for k, v in list(files.items()):
+ rv.append({'path': k, 'contents': base64.b64encode(b(v))})
+
+ return rv
+
+ def _reboot_node(self, node, reboot_type='SOFT'):
+ resp = self._node_action(node, 'reboot', type=reboot_type)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_set_password(self, node, password):
+ """
+ Changes the administrator password for a specified server.
+
+ :param node: Node to rebuild.
+ :type node: :class:`Node`
+
+ :param password: The administrator password.
+ :type password: ``str``
+
+ :rtype: ``bool``
+ """
+ resp = self._node_action(node, 'changePassword', adminPass=password)
+ node.extra['password'] = password
+ return resp.status == httplib.ACCEPTED
+
+ def ex_rebuild(self, node, image, **kwargs):
+ """
+ Rebuild a Node.
+
+ :param node: Node to rebuild.
+ :type node: :class:`Node`
+
+ :param image: New image to use.
+ :type image: :class:`NodeImage`
+
+ :keyword ex_metadata: Key/Value metadata to associate with a node
+ :type ex_metadata: ``dict``
+
+ :keyword ex_files: File Path => File contents to create on
+ the no de
+ :type ex_files: ``dict``
+
+ :keyword ex_keyname: Name of existing public key to inject into
+ instance
+ :type ex_keyname: ``str``
+
+ :keyword ex_userdata: String containing user data
+ see
+ https://help.ubuntu.com/community/CloudInit
+ :type ex_userdata: ``str``
+
+ :keyword ex_security_groups: List of security groups to assign to
+ the node
+ :type ex_security_groups: ``list`` of
+ :class:`OpenStackSecurityGroup`
+
+ :keyword ex_disk_config: Name of the disk configuration.
+ Can be either ``AUTO`` or ``MANUAL``.
+ :type ex_disk_config: ``str``
+
+ :rtype: ``bool``
+ """
+ server_params = self._create_args_to_params(node, image=image,
+ **kwargs)
+ resp = self._node_action(node, 'rebuild', **server_params)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_resize(self, node, size):
+ """
+ Change a node size.
+
+ :param node: Node to resize.
+ :type node: :class:`Node`
+
+ :type size: :class:`NodeSize`
+ :param size: New size to use.
+
+ :rtype: ``bool``
+ """
+ server_params = self._create_args_to_params(node, size=size)
+ resp = self._node_action(node, 'resize', **server_params)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_confirm_resize(self, node):
+ """
+ Confirms a pending resize action.
+
+ :param node: Node to resize.
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ resp = self._node_action(node, 'confirmResize')
+ return resp.status == httplib.NO_CONTENT
+
+ def ex_revert_resize(self, node):
+ """
+ Cancels and reverts a pending resize action.
+
+ :param node: Node to resize.
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ resp = self._node_action(node, 'revertResize')
+ return resp.status == httplib.ACCEPTED
+
+ def create_image(self, node, name, metadata=None):
+ """
+ Creates a new image.
+
+ :param node: Node
+ :type node: :class:`Node`
+
+ :param name: The name for the new image.
+ :type name: ``str``
+
+ :param metadata: Key and value pairs for metadata.
+ :type metadata: ``dict``
+
+ :rtype: :class:`NodeImage`
+ """
+ optional_params = {}
+ if metadata:
+ optional_params['metadata'] = metadata
+ resp = self._node_action(node, 'createImage', name=name,
+ **optional_params)
+ image_id = self._extract_image_id_from_url(resp.headers['location'])
+ return self.get_image(image_id=image_id)
+
+ def ex_set_server_name(self, node, name):
+ """
+ Sets the Node's name.
+
+ :param node: Node
+ :type node: :class:`Node`
+
+ :param name: The name of the server.
+ :type name: ``str``
+
+ :rtype: :class:`Node`
+ """
+ return self._update_node(node, name=name)
+
+ def ex_get_metadata(self, node):
+ """
+ Get a Node's metadata.
+
+ :param node: Node
+ :type node: :class:`Node`
+
+ :return: Key/Value metadata associated with node.
+ :rtype: ``dict``
+ """
+ return self.connection.request(
+ '/servers/%s/metadata' % (node.id,),
+ method='GET',).object['metadata']
+
+ def ex_set_metadata(self, node, metadata):
+ """
+ Sets the Node's metadata.
+
+ :param node: Node
+ :type node: :class:`Node`
+
+ :param metadata: Key/Value metadata to associate with a node
+ :type metadata: ``dict``
+
+ :rtype: ``dict``
+ """
+ return self.connection.request(
+ '/servers/%s/metadata' % (node.id,), method='PUT',
+ data={'metadata': metadata}
+ ).object['metadata']
+
+ def ex_update_node(self, node, **node_updates):
+ """
+ Update the Node's editable attributes. The OpenStack API currently
+ supports editing name and IPv4/IPv6 access addresses.
+
+ The driver currently only supports updating the node name.
+
+ :param node: Node
+ :type node: :class:`Node`
+
+ :keyword name: New name for the server
+ :type name: ``str``
+
+ :rtype: :class:`Node`
+ """
+ potential_data = self._create_args_to_params(node, **node_updates)
+ updates = {'name': potential_data['name']}
+ return self._update_node(node, **updates)
+
+ def _to_networks(self, obj):
+ networks = obj['networks']
+ return [self._to_network(network) for network in networks]
+
+ def _to_network(self, obj):
+ return OpenStackNetwork(id=obj['id'],
+ name=obj['label'],
+ cidr=obj.get('cidr', None),
+ driver=self)
+
+ def ex_list_networks(self):
+ """
+ Get a list of Networks that are available.
+
+ :rtype: ``list`` of :class:`OpenStackNetwork`
+ """
+ response = self.connection.request(self._networks_url_prefix).object
+ return self._to_networks(response)
+
+ def ex_create_network(self, name, cidr):
+ """
+ Create a new Network
+
+ :param name: Name of network which should be used
+ :type name: ``str``
+
+ :param cidr: cidr of network which should be used
+ :type cidr: ``str``
+
+ :rtype: :class:`OpenStackNetwork`
+ """
+ data = {'network': {'cidr': cidr, 'label': name}}
+ response = self.connection.request(self._networks_url_prefix,
+ method='POST', data=data).object
+ return self._to_network(response['network'])
+
+ def ex_delete_network(self, network):
+ """
+ Get a list of NodeNetorks that are available.
+
+ :param network: Network which should be used
+ :type network: :class:`OpenStackNetwork`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('%s/%s' % (self._networks_url_prefix,
+ network.id),
+ method='DELETE')
+ return resp.status == httplib.ACCEPTED
+
+ def ex_get_console_output(self, node, length=None):
+ """
+ Get console output
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :param length: Optional number of lines to fetch from the
+ console log
+ :type length: ``int``
+
+ :return: Dictionary with the output
+ :rtype: ``dict``
+ """
+
+ data = {
+ "os-getConsoleOutput": {
+ "length": length
+ }
+ }
+
+ resp = self.connection.request('/servers/%s/action' % node.id,
+ method='POST', data=data).object
+ return resp
+
+ def ex_list_snapshots(self):
+ return self._to_snapshots(
+ self.connection.request('/os-snapshots').object)
+
+ def ex_create_snapshot(self, volume, name, description=None, force=False):
+ """
+ Create a snapshot based off of a volume.
+
+ :param volume: volume
+ :type volume: :class:`StorageVolume`
+
+ :keyword name: New name for the volume snapshot
+ :type name: ``str``
+
+ :keyword description: Description of the snapshot (optional)
+ :type description: ``str``
+
+ :keyword force: Whether to force creation (optional)
+ :type force: ``bool``
+
+ :rtype: :class:`VolumeSnapshot`
+ """
+ data = {'snapshot': {'display_name': name,
+ 'display_description': description,
+ 'volume_id': volume.id,
+ 'force': force}}
+
+ return self._to_snapshot(self.connection.request('/os-snapshots',
+ method='POST',
+ data=data).object)
+
+ def ex_delete_snapshot(self, snapshot):
+ """
+ Delete a VolumeSnapshot
+
+ :param snapshot: snapshot
+ :type snapshot: :class:`VolumeSnapshot`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('/os-snapshots/%s' % snapshot.id,
+ method='DELETE')
+ return resp.status == httplib.NO_CONTENT
+
+ def _to_security_group_rules(self, obj):
+ return [self._to_security_group_rule(security_group_rule) for
+ security_group_rule in obj]
+
+ def _to_security_group_rule(self, obj):
+ ip_range = group = tenant_id = None
+ if obj['group'] == {}:
+ ip_range = obj['ip_range'].get('cidr', None)
+ else:
+ group = obj['group'].get('name', None)
+ tenant_id = obj['group'].get('tenant_id', None)
+
+ return OpenStackSecurityGroupRule(
+ id=obj['id'], parent_group_id=obj['parent_group_id'],
+ ip_protocol=obj['ip_protocol'], from_port=obj['from_port'],
+ to_port=obj['to_port'], driver=self, ip_range=ip_range,
+ group=group, tenant_id=tenant_id)
+
+ def _to_security_groups(self, obj):
+ security_groups = obj['security_groups']
+ return [self._to_security_group(security_group) for security_group in
+ security_groups]
+
+ def _to_security_group(self, obj):
+ rules = self._to_security_group_rules(obj.get('rules', []))
+ return OpenStackSecurityGroup(id=obj['id'],
+ tenant_id=obj['tenant_id'],
+ name=obj['name'],
+ description=obj.get('description', ''),
+ rules=rules,
+ driver=self)
+
+ def ex_list_security_groups(self):
+ """
+ Get a list of Security Groups that are available.
+
+ :rtype: ``list`` of :class:`OpenStackSecurityGroup`
+ """
+ return self._to_security_groups(
+ self.connection.request('/os-security-groups').object)
+
+ def ex_get_node_security_groups(self, node):
+ """
+ Get Security Groups of the specified server.
+
+ :rtype: ``list`` of :class:`OpenStackSecurityGroup`
+ """
+ return self._to_security_groups(
+ self.connection.request('/servers/%s/os-security-groups' %
+ (node.id)).object)
+
+ def ex_create_security_group(self, name, description):
+ """
+ Create a new Security Group
+
+ :param name: Name of the new Security Group
+ :type name: ``str``
+
+ :param description: Description of the new Security Group
+ :type description: ``str``
+
+ :rtype: :class:`OpenStackSecurityGroup`
+ """
+ return self._to_security_group(self.connection.request(
+ '/os-security-groups', method='POST',
+ data={'security_group': {'name': name, 'description': description}}
+ ).object['security_group'])
+
+ def ex_delete_security_group(self, security_group):
+ """
+ Delete a Security Group.
+
+ :param security_group: Security Group should be deleted
+ :type security_group: :class:`OpenStackSecurityGroup`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('/os-security-groups/%s' %
+ (security_group.id),
+ method='DELETE')
+ return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
+
+ def ex_create_security_group_rule(self, security_group, ip_protocol,
+ from_port, to_port, cidr=None,
+ source_security_group=None):
+ """
+ Create a new Rule in a Security Group
+
+ :param security_group: Security Group in which to add the rule
+ :type security_group: :class:`OpenStackSecurityGroup`
+
+ :param ip_protocol: Protocol to which this rule applies
+ Examples: tcp, udp, ...
+ :type ip_protocol: ``str``
+
+ :param from_port: First port of the port range
+ :type from_port: ``int``
+
+ :param to_port: Last port of the port range
+ :type to_port: ``int``
+
+ :param cidr: CIDR notation of the source IP range for this rule
+ :type cidr: ``str``
+
+ :param source_security_group: Existing Security Group to use as the
+ source (instead of CIDR)
+ :type source_security_group: L{OpenStackSecurityGroup
+
+ :rtype: :class:`OpenStackSecurityGroupRule`
+ """
+ source_security_group_id = None
+ if type(source_security_group) == OpenStackSecurityGroup:
+ source_security_group_id = source_security_group.id
+
+ return self._to_security_group_rule(self.connection.request(
+ '/os-security-group-rules', method='POST',
+ data={'security_group_rule': {
+ 'ip_protocol': ip_protocol,
+ 'from_port': from_port,
+ 'to_port': to_port,
+ 'cidr': cidr,
+ 'group_id': source_security_group_id,
+ 'parent_group_id': security_group.id}}
+ ).object['security_group_rule'])
+
+ def ex_delete_security_group_rule(self, rule):
+ """
+ Delete a Rule from a Security Group.
+
+ :param rule: Rule should be deleted
+ :type rule: :class:`OpenStackSecurityGroupRule`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('/os-security-group-rules/%s' %
+ (rule.id), method='DELETE')
+ return resp.status == httplib.NO_CONTENT
+
+ def _to_key_pairs(self, obj):
+ key_pairs = obj['keypairs']
+ key_pairs = [self._to_key_pair(key_pair['keypair']) for key_pair in
+ key_pairs]
+ return key_pairs
+
+ def _to_key_pair(self, obj):
+ key_pair = KeyPair(name=obj['name'],
+ fingerprint=obj['fingerprint'],
+ public_key=obj['public_key'],
+ private_key=obj.get('private_key', None),
+ driver=self)
+ return key_pair
+
+ def list_key_pairs(self):
+ response = self.connection.request('/os-keypairs')
+ key_pairs = self._to_key_pairs(response.object)
+ return key_pairs
+
+ def get_key_pair(self, name):
+ self.connection.set_context({'key_pair_name': name})
+
+ response = self.connection.request('/os-keypairs/%s' % (name))
+ key_pair = self._to_key_pair(response.object['keypair'])
+ return key_pair
+
+ def create_key_pair(self, name):
+ data = {'keypair': {'name': name}}
+ response = self.connection.request('/os-keypairs', method='POST',
+ data=data)
+ key_pair = self._to_key_pair(response.object['keypair'])
+ return key_pair
+
+ def import_key_pair_from_string(self, name, key_material):
+ data = {'keypair': {'name': name, 'public_key': key_material}}
+ response = self.connection.request('/os-keypairs', method='POST',
+ data=data)
+ key_pair = self._to_key_pair(response.object['keypair'])
+ return key_pair
+
+ def delete_key_pair(self, key_pair):
+ """
+ Delete a KeyPair.
+
+ :param keypair: KeyPair to delete
+ :type keypair: :class:`OpenStackKeyPair`
+
+ :rtype: ``bool``
+ """
+ response = self.connection.request('/os-keypairs/%s' % (key_pair.name),
+ method='DELETE')
+ return response.status == httplib.ACCEPTED
+
+ def ex_list_keypairs(self):
+ """
+ Get a list of KeyPairs that are available.
+
+ :rtype: ``list`` of :class:`OpenStackKeyPair`
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'list_key_pairs method')
+
+ return self.list_key_pairs()
+
+ def ex_create_keypair(self, name):
+ """
+ Create a new KeyPair
+
+ :param name: Name of the new KeyPair
+ :type name: ``str``
+
+ :rtype: :class:`OpenStackKeyPair`
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'create_key_pair method')
+
+ return self.create_key_pair(name=name)
+
+ def ex_import_keypair(self, name, keyfile):
+ """
+ Import a KeyPair from a file
+
+ :param name: Name of the new KeyPair
+ :type name: ``str``
+
+ :param keyfile: Path to the public key file (in OpenSSH format)
+ :type keyfile: ``str``
+
+ :rtype: :class:`OpenStackKeyPair`
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'import_key_pair_from_file method')
+
+ return self.import_key_pair_from_file(name=name, key_file_path=keyfile)
+
+ def ex_import_keypair_from_string(self, name, key_material):
+ """
+ Import a KeyPair from a string
+
+ :param name: Name of the new KeyPair
+ :type name: ``str``
+
+ :param key_material: Public key (in OpenSSH format)
+ :type key_material: ``str``
+
+ :rtype: :class:`OpenStackKeyPair`
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'import_key_pair_from_string method')
+
+ return self.import_key_pair_from_string(name=name,
+ key_material=key_material)
+
+ def ex_delete_keypair(self, keypair):
+ """
+ Delete a KeyPair.
+
+ :param keypair: KeyPair to delete
+ :type keypair: :class:`OpenStackKeyPair`
+
+ :rtype: ``bool``
+ """
+ warnings.warn('This method has been deprecated in favor of '
+ 'delete_key_pair method')
+
+ return self.delete_key_pair(key_pair=keypair)
+
+ def ex_get_size(self, size_id):
+ """
+ Get a NodeSize
+
+ :param size_id: ID of the size which should be used
+ :type size_id: ``str``
+
+ :rtype: :class:`NodeSize`
+ """
+ return self._to_size(self.connection.request(
+ '/flavors/%s' % (size_id,)) .object['flavor'])
+
+ def get_image(self, image_id):
+ """
+ Get a NodeImage
+
+ @inherits: :class:`NodeDriver.get_image`
+
+ :param image_id: ID of the image which should be used
+ :type image_id: ``str``
+
+ :rtype: :class:`NodeImage`
+ """
+ return self._to_image(self.connection.request(
+ '/images/%s' % (image_id,)).object['image'])
+
+ def delete_image(self, image):
+ """
+ Delete a NodeImage
+
+ @inherits: :class:`NodeDriver.delete_image`
+
+ :param image: image witch should be used
+ :type image: :class:`NodeImage`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('/images/%s' % (image.id,),
+ method='DELETE')
+ return resp.status == httplib.NO_CONTENT
+
+ def _node_action(self, node, action, **params):
+ params = params or None
+ return self.connection.request('/servers/%s/action' % (node.id,),
+ method='POST', data={action: params})
+
+ def _update_node(self, node, **node_updates):
+ """
+ Updates the editable attributes of a server, which currently include
+ its name and IPv4/IPv6 access addresses.
+ """
+ return self._to_node(
+ self.connection.request(
+ '/servers/%s' % (node.id,), method='PUT',
+ data={'server': node_updates}
+ ).object['server']
+ )
+
+ def _to_node_from_obj(self, obj):
+ return self._to_node(obj['server'])
+
+ def _to_node(self, api_node):
+ public_networks_labels = ['public', 'internet']
+
+ public_ips, private_ips = [], []
+
+ for label, values in api_node['addresses'].items():
+ ips = [v['addr'] for v in values]
+
+ if label in public_networks_labels:
+ public_ips.extend(ips)
+ else:
+ for ip in ips:
+ # is_private_subnet does not check for ipv6
+ try:
+ if is_private_subnet(ip):
+ private_ips.append(ip)
+ else:
+ public_ips.append(ip)
+ except:
+ private_ips.append(ip)
+
+ # Sometimes 'image' attribute is not present if the node is in an error
+ # state
+ image = api_node.get('image', None)
+ image_id = image.get('id', None) if image else None
+
+ return Node(
+ id=api_node['id'],
+ name=api_node['name'],
+ state=self.NODE_STATE_MAP.get(api_node['status'],
+ NodeState.UNKNOWN),
+ public_ips=public_ips,
+ private_ips=private_ips,
+ driver=self,
+ extra=dict(
+ hostId=api_node['hostId'],
+ access_ip=api_node.get('accessIPv4'),
+ # Docs says "tenantId", but actual is "tenant_id". *sigh*
+ # Best handle both.
+ tenantId=api_node.get('tenant_id') or api_node['tenantId'],
+ imageId=image_id,
+ flavorId=api_node['flavor']['id'],
+ uri=next(link['href'] for link in api_node['links'] if
+ link['rel'] == 'self'),
+ metadata=api_node['metadata'],
+ password=api_node.get('adminPass', None),
+ created=api_node['created'],
+ updated=api_node['updated'],
+ key_name=api_node.get('key_name', None),
+ disk_config=api_node.get('OS-DCF:diskConfig', None),
+ availability_zone=api_node.get('OS-EXT-AZ:availability_zone',
+ None),
+ ),
+ )
+
+ def _to_volume(self, api_node):
+ if 'volume' in api_node:
+ api_node = api_node['volume']
+ return StorageVolume(
+ id=api_node['id'],
+ name=api_node['displayName'],
+ size=api_node['size'],
+ driver=self,
+ extra={
+ 'description': api_node['displayDescription'],
+ 'attachments': [att for att in api_node['attachments'] if att],
+ }
+ )
+
+ def _to_snapshot(self, data):
+ if 'snapshot' in data:
+ data = data['snapshot']
+
+ volume_id = data.get('volume_id', data.get('volumeId', None))
+ display_name = data.get('display_name', data.get('displayName', None))
+ created_at = data.get('created_at', data.get('createdAt', None))
+ description = data.get('display_description',
+ data.get('displayDescription', None))
+ status = data.get('status', None)
+
+ extra = {'volume_id': volume_id,
+ 'name': display_name,
+ 'created': created_at,
+ 'description': description,
+ 'status': status}
+
+ snapshot = VolumeSnapshot(id=data['id'], driver=self,
+ size=data['size'], extra=extra)
+ return snapshot
+
+ def _to_size(self, api_flavor, price=None, bandwidth=None):
+ # if provider-specific subclasses can get better values for
+ # price/bandwidth, then can pass them in when they super().
+ if not price:
+ price = self._get_size_price(str(api_flavor['id']))
+
+ return OpenStackNodeSize(
+ id=api_flavor['id'],
+ name=api_flavor['name'],
+ ram=api_flavor['ram'],
+ disk=api_flavor['disk'],
+ vcpus=api_flavor['vcpus'],
+ bandwidth=bandwidth,
+ price=price,
+ driver=self,
+ )
+
+ def _get_size_price(self, size_id):
+ try:
+ return get_size_price(
+ driver_type='compute',
+ driver_name=self.api_name,
+ size_id=size_id,
+ )
+ except KeyError:
+ return(0.0)
+
+ def _extract_image_id_from_url(self, location_header):
+ path = urlparse.urlparse(location_header).path
+ image_id = path.split('/')[-1]
+ return image_id
+
+ def ex_rescue(self, node, password=None):
+ # Requires Rescue Mode extension
+ """
+ Rescue a node
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :param password: password
+ :type password: ``str``
+
+ :rtype: :class:`Node`
+ """
+ if password:
+ resp = self._node_action(node, 'rescue', adminPass=password)
+ else:
+ resp = self._node_action(node, 'rescue')
+ password = json.loads(resp.body)['adminPass']
+ node.extra['password'] = password
+ return node
+
+ def ex_unrescue(self, node):
+ """
+ Unrescue a node
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ resp = self._node_action(node, 'unrescue')
+ return resp.status == httplib.ACCEPTED
+
+ def _to_floating_ip_pools(self, obj):
+ pool_elements = obj['floating_ip_pools']
+ return [self._to_floating_ip_pool(pool) for pool in pool_elements]
+
+ def _to_floating_ip_pool(self, obj):
+ return OpenStack_1_1_FloatingIpPool(obj['name'], self.connection)
+
+ def ex_list_floating_ip_pools(self):
+ """
+ List available floating IP pools
+
+ :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpPool`
+ """
+ return self._to_floating_ip_pools(
+ self.connection.request('/os-floating-ip-pools').object)
+
+ def _to_floating_ips(self, obj):
+ ip_elements = obj['floating_ips']
+ return [self._to_floating_ip(ip) for ip in ip_elements]
+
+ def _to_floating_ip(self, obj):
+ return OpenStack_1_1_FloatingIpAddress(id=obj['id'],
+ ip_address=obj['ip'],
+ pool=None,
+ node_id=obj['instance_id'],
+ driver=self)
+
+ def ex_list_floating_ips(self):
+ """
+ List floating IPs
+
+ :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress`
+ """
+ return self._to_floating_ips(
+ self.connection.request('/os-floating-ips').object)
+
+ def ex_get_floating_ip(self, ip):
+ """
+ Get specified floating IP
+
+ :param ip: floating IP to get
+ :type ip: ``str``
+
+ :rtype: :class:`OpenStack_1_1_FloatingIpAddress`
+ """
+ floating_ips = self.ex_list_floating_ips()
+ ip_obj, = [x for x in floating_ips if x.ip_address == ip]
+ return ip_obj
+
+ def ex_create_floating_ip(self):
+ """
+ Create new floating IP
+
+ :rtype: :class:`OpenStack_1_1_FloatingIpAddress`
+ """
+ resp = self.connection.request('/os-floating-ips',
+ method='POST',
+ data={})
+ data = resp.object['floating_ip']
+ id = data['id']
+ ip_address = data['ip']
+ return OpenStack_1_1_FloatingIpAddress(id=id,
+ ip_address=ip_address,
+ pool=None,
+ node_id=None,
+ driver=self)
+
+ def ex_delete_floating_ip(self, ip):
+ """
+ Delete specified floating IP
+
+ :param ip: floating IP to remove
+ :type ip: :class:`OpenStack_1_1_FloatingIpAddress`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('/os-floating-ips/%s' % ip.id,
+ method='DELETE')
+ return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
+
+ def ex_attach_floating_ip_to_node(self, node, ip):
+ """
+ Attach the floating IP to the node
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :param ip: floating IP to attach
+ :type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress`
+
+ :rtype: ``bool``
+ """
+ address = ip.ip_address if hasattr(ip, 'ip_address') else ip
+ data = {
+ 'addFloatingIp': {'address': address}
+ }
+ resp = self.connection.request('/servers/%s/action' % node.id,
+ method='POST', data=data)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_detach_floating_ip_from_node(self, node, ip):
+ """
+ Detach the floating IP from the node
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :param ip: floating IP to remove
+ :type ip: ``str`` or :class:`OpenStack_1_1_FloatingIpAddress`
+
+ :rtype: ``bool``
+ """
+ address = ip.ip_address if hasattr(ip, 'ip_address') else ip
+ data = {
+ 'removeFloatingIp': {'address': address}
+ }
+ resp = self.connection.request('/servers/%s/action' % node.id,
+ method='POST', data=data)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_get_metadata_for_node(self, node):
+ """
+ Return the metadata associated with the node.
+
+ :param node: Node instance
+ :type node: :class:`Node`
+
+ :return: A dictionary or other mapping of strings to strings,
+ associating tag names with tag values.
+ :type tags: ``dict``
+ """
+ return node.extra['metadata']
+
+ def ex_pause_node(self, node):
+ uri = '/servers/%s/action' % (node.id)
+ data = {'pause': None}
+ resp = self.connection.request(uri, method='POST', data=data)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_unpause_node(self, node):
+ uri = '/servers/%s/action' % (node.id)
+ data = {'unpause': None}
+ resp = self.connection.request(uri, method='POST', data=data)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_suspend_node(self, node):
+ uri = '/servers/%s/action' % (node.id)
+ data = {'suspend': None}
+ resp = self.connection.request(uri, method='POST', data=data)
+ return resp.status == httplib.ACCEPTED
+
+ def ex_resume_node(self, node):
+ uri = '/servers/%s/action' % (node.id)
+ data = {'resume': None}
+ resp = self.connection.request(uri, method='POST', data=data)
+ return resp.status == httplib.ACCEPTED
+
+
+class OpenStack_1_1_FloatingIpPool(object):
+ """
+ Floating IP Pool info.
+ """
+
+ def __init__(self, name, connection):
+ self.name = name
+ self.connection = connection
+
+ def list_floating_ips(self):
+ """
+ List floating IPs in the pool
+
+ :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress`
+ """
+ return self._to_floating_ips(
+ self.connection.request('/os-floating-ips').object)
+
+ def _to_floating_ips(self, obj):
+ ip_elements = obj['floating_ips']
+ return [self._to_floating_ip(ip) for ip in ip_elements]
+
+ def _to_floating_ip(self, obj):
+ return OpenStack_1_1_FloatingIpAddress(id=obj['id'],
+ ip_address=obj['ip'],
+ pool=self,
+ node_id=obj['instance_id'],
+ driver=self.connection.driver)
+
+ def get_floating_ip(self, ip):
+ """
+ Get specified floating IP from the pool
+
+ :param ip: floating IP to get
+ :type ip: ``str``
+
+ :rtype: :class:`OpenStack_1_1_FloatingIpAddress`
+ """
+ ip_obj, = [x for x in self.list_floating_ips() if x.ip_address == ip]
+ return ip_obj
+
+ def create_floating_ip(self):
+ """
+ Create new floating IP in the pool
+
+ :rtype: :class:`OpenStack_1_1_FloatingIpAddress`
+ """
+ resp = self.connection.request('/os-floating-ips',
+ method='POST',
+ data={'pool': self.name})
+ data = resp.object['floating_ip']
+ id = data['id']
+ ip_address = data['ip']
+ return OpenStack_1_1_FloatingIpAddress(id=id,
+ ip_address=ip_address,
+ pool=self,
+ node_id=None,
+ driver=self.connection.driver)
+
+ def delete_floating_ip(self, ip):
+ """
+ Delete specified floating IP from the pool
+
+ :param ip: floating IP to remove
+ :type ip::class:`OpenStack_1_1_FloatingIpAddress`
+
+ :rtype: ``bool``
+ """
+ resp = self.connection.request('/os-floating-ips/%s' % ip.id,
+ method='DELETE')
+ return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED)
+
+ def __repr__(self):
+ return ('' % self.name)
+
+
+class OpenStack_1_1_FloatingIpAddress(object):
+ """
+ Floating IP info.
+ """
+
+ def __init__(self, id, ip_address, pool, node_id=None, driver=None):
+ self.id = str(id)
+ self.ip_address = ip_address
+ self.pool = pool
+ self.node_id = node_id
+ self.driver = driver
+
+ def delete(self):
+ """
+ Delete this floating IP
+
+ :rtype: ``bool``
+ """
+ if self.pool is not None:
+ return self.pool.delete_floating_ip(self)
+ elif self.driver is not None:
+ return self.driver.ex_delete_floating_ip(self)
+
+ def __repr__(self):
+ return (''
+ % (self.id, self.ip_address, self.pool, self.driver))
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/opsource.py b/awx/lib/site-packages/libcloud/compute/drivers/opsource.py
new file mode 100644
index 0000000000..d40155c0e2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/opsource.py
@@ -0,0 +1,620 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Opsource Driver
+"""
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from base64 import b64encode
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+from libcloud.compute.base import NodeDriver, Node
+from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
+from libcloud.common.types import LibcloudError, InvalidCredsError
+from libcloud.common.base import ConnectionUserAndKey, XmlResponse
+from libcloud.utils.xml import fixxpath, findtext, findall
+from libcloud.compute.types import NodeState, Provider
+
+# Roadmap / TODO:
+#
+# 0.1 - Basic functionality: create, delete, start, stop, reboot - servers
+# (base OS images only, no customer images supported yet)
+# x implement list_nodes()
+# x implement create_node() (only support Base OS images,
+# no customer images yet)
+# x implement reboot()
+# x implement destroy_node()
+# x implement list_sizes()
+# x implement list_images() (only support Base OS images,
+# no customer images yet)
+# x implement list_locations()
+# x implement ex_* extension functions for opsource-specific featurebody
+# x ex_graceful_shutdown
+# x ex_start_node
+# x ex_power_off
+# x ex_list_networks (needed for create_node())
+# x refactor: switch to using fixxpath() from the vcloud driver for
+# dealing with xml namespace tags
+# x refactor: move some functionality from OpsourceConnection.request()
+# method into new .request_with_orgId() method
+# x add OpsourceStatus object support to:
+# x _to_node()
+# x _to_network()
+# x implement test cases
+#
+# 0.2 - Support customer images (snapshots) and server modification functions
+# - support customer-created images:
+# - list deployed customer images (in list_images() ?)
+# - list pending customer images (in list_images() ?)
+# - delete customer images
+# - modify customer images
+# - add "pending-servers" in list_nodes()
+# - implement various ex_* extension functions for opsource-specific
+# features
+# - ex_modify_server()
+# - ex_add_storage_to_server()
+# - ex_snapshot_server() (create's customer image)
+#
+# 0.3 - support Network API
+# 0.4 - Support VIP/Load-balancing API
+# 0.5 - support Files Account API
+# 0.6 - support Reports API
+# 1.0 - Opsource 0.9 API feature complete, tested
+
+# setup a few variables to represent all of the opsource cloud namespaces
+NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
+ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
+SERVER_NS = NAMESPACE_BASE + "/server"
+NETWORK_NS = NAMESPACE_BASE + "/network"
+DIRECTORY_NS = NAMESPACE_BASE + "/directory"
+RESET_NS = NAMESPACE_BASE + "/reset"
+VIP_NS = NAMESPACE_BASE + "/vip"
+IMAGEIMPORTEXPORT_NS = NAMESPACE_BASE + "/imageimportexport"
+DATACENTER_NS = NAMESPACE_BASE + "/datacenter"
+SUPPORT_NS = NAMESPACE_BASE + "/support"
+GENERAL_NS = NAMESPACE_BASE + "/general"
+IPPLAN_NS = NAMESPACE_BASE + "/ipplan"
+WHITELABEL_NS = NAMESPACE_BASE + "/whitelabel"
+
+
+class OpsourceResponse(XmlResponse):
+
+ def parse_error(self):
+ if self.status == httplib.UNAUTHORIZED:
+ raise InvalidCredsError(self.body)
+ elif self.status == httplib.FORBIDDEN:
+ raise InvalidCredsError(self.body)
+
+ body = self.parse_body()
+
+ if self.status == httplib.BAD_REQUEST:
+ code = findtext(body, 'resultCode', SERVER_NS)
+ message = findtext(body, 'resultDetail', SERVER_NS)
+ raise OpsourceAPIException(code,
+ message,
+ driver=OpsourceNodeDriver)
+
+ return self.body
+
+
+class OpsourceAPIException(LibcloudError):
+ def __init__(self, code, msg, driver):
+ self.code = code
+ self.msg = msg
+ self.driver = driver
+
+ def __str__(self):
+ return "%s: %s" % (self.code, self.msg)
+
+ def __repr__(self):
+ return ("" %
+ (self.code, self.msg))
+
+
+class OpsourceConnection(ConnectionUserAndKey):
+ """
+ Connection class for the Opsource driver
+ """
+
+ host = 'api.opsourcecloud.net'
+ api_path = '/oec'
+ api_version = '0.9'
+ _orgId = None
+ responseCls = OpsourceResponse
+
+ allow_insecure = False
+
+ def add_default_headers(self, headers):
+ headers['Authorization'] = \
+ ('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
+ self.key))).decode('utf-8'))
+ return headers
+
+ def request(self, action, params=None, data='',
+ headers=None, method='GET'):
+ action = "%s/%s/%s" % (self.api_path, self.api_version, action)
+
+ return super(OpsourceConnection, self).request(
+ action=action,
+ params=params, data=data,
+ method=method, headers=headers)
+
+ def request_with_orgId(self, action, params=None, data='',
+ headers=None, method='GET'):
+ action = "%s/%s" % (self.get_resource_path(), action)
+
+ return super(OpsourceConnection, self).request(
+ action=action,
+ params=params, data=data,
+ method=method, headers=headers)
+
+ def get_resource_path(self):
+ """
+ This method returns a resource path which is necessary for referencing
+ resources that require a full path instead of just an ID, such as
+ networks, and customer snapshots.
+ """
+ return ("%s/%s/%s" % (self.api_path, self.api_version,
+ self._get_orgId()))
+
+ def _get_orgId(self):
+ """
+ Send the /myaccount API request to opsource cloud and parse the
+ 'orgId' from the XML response object. We need the orgId to use most
+ of the other API functions
+ """
+ if self._orgId is None:
+ body = self.request('myaccount').object
+ self._orgId = findtext(body, 'orgId', DIRECTORY_NS)
+ return self._orgId
+
+
+class OpsourceStatus(object):
+ """
+ Opsource API pending operation status class
+ action, requestTime, username, numberOfSteps, updateTime,
+ step.name, step.number, step.percentComplete, failureReason,
+ """
+ def __init__(self, action=None, requestTime=None, userName=None,
+ numberOfSteps=None, updateTime=None, step_name=None,
+ step_number=None, step_percentComplete=None,
+ failureReason=None):
+ self.action = action
+ self.requestTime = requestTime
+ self.userName = userName
+ self.numberOfSteps = numberOfSteps
+ self.updateTime = updateTime
+ self.step_name = step_name
+ self.step_number = step_number
+ self.step_percentComplete = step_percentComplete
+ self.failureReason = failureReason
+
+ def __repr__(self):
+ return (('')
+ % (self.id, self.name, self.description, self.location,
+ self.privateNet, self.multicast))
+
+
+class OpsourceNodeDriver(NodeDriver):
+ """
+ Opsource node driver.
+ """
+
+ connectionCls = OpsourceConnection
+ name = 'Opsource'
+ website = 'http://www.opsource.net/'
+ type = Provider.OPSOURCE
+ features = {'create_node': ['password']}
+
+ def create_node(self, **kwargs):
+ """
+ Create a new opsource node
+
+ :keyword name: String with a name for this new node (required)
+ :type name: ``str``
+
+ :keyword image: OS Image to boot on node. (required)
+ :type image: :class:`NodeImage`
+
+ :keyword auth: Initial authentication information for the
+ node (required)
+ :type auth: :class:`NodeAuthPassword`
+
+ :keyword ex_description: description for this node (required)
+ :type ex_description: ``str``
+
+ :keyword ex_network: Network to create the node within (required)
+ :type ex_network: :class:`OpsourceNetwork`
+
+ :keyword ex_isStarted: Start server after creation? default
+ true (required)
+ :type ex_isStarted: ``bool``
+
+ :return: The newly created :class:`Node`. NOTE: Opsource does not
+ provide a
+ way to determine the ID of the server that was just created,
+ so the returned :class:`Node` is not guaranteed to be the same
+ one that was created. This is only the case when multiple
+ nodes with the same name exist.
+ :rtype: :class:`Node`
+ """
+ name = kwargs['name']
+ image = kwargs['image']
+
+ # XXX: Node sizes can be adjusted after a node is created, but
+ # cannot be set at create time because size is part of the
+ # image definition.
+ password = None
+ auth = self._get_and_check_auth(kwargs.get('auth'))
+ password = auth.password
+
+ ex_description = kwargs.get('ex_description', '')
+ ex_isStarted = kwargs.get('ex_isStarted', True)
+
+ ex_network = kwargs.get('ex_network')
+ if not isinstance(ex_network, OpsourceNetwork):
+ raise ValueError('ex_network must be of OpsourceNetwork type')
+ vlanResourcePath = "%s/%s" % (self.connection.get_resource_path(),
+ ex_network.id)
+
+ imageResourcePath = None
+ if 'resourcePath' in image.extra:
+ imageResourcePath = image.extra['resourcePath']
+ else:
+ imageResourcePath = "%s/%s" % (self.connection.get_resource_path(),
+ image.id)
+
+ server_elm = ET.Element('Server', {'xmlns': SERVER_NS})
+ ET.SubElement(server_elm, "name").text = name
+ ET.SubElement(server_elm, "description").text = ex_description
+ ET.SubElement(server_elm, "vlanResourcePath").text = vlanResourcePath
+ ET.SubElement(server_elm, "imageResourcePath").text = imageResourcePath
+ ET.SubElement(server_elm, "administratorPassword").text = password
+ ET.SubElement(server_elm, "isStarted").text = str(ex_isStarted)
+
+ self.connection.request_with_orgId('server',
+ method='POST',
+ data=ET.tostring(server_elm)).object
+
+ # XXX: return the last node in the list that has a matching name. this
+ # is likely but not guaranteed to be the node we just created
+ # because opsource allows multiple nodes to have the same name
+ node = list(filter(lambda x: x.name == name, self.list_nodes()))[-1]
+
+ if getattr(auth, "generated", False):
+ node.extra['password'] = auth.password
+
+ return node
+
+ def destroy_node(self, node):
+ body = self.connection.request_with_orgId(
+ 'server/%s?delete' % (node.id)).object
+
+ result = findtext(body, 'result', GENERAL_NS)
+ return result == 'SUCCESS'
+
+ def reboot_node(self, node):
+ body = self.connection.request_with_orgId(
+ 'server/%s?restart' % (node.id)).object
+ result = findtext(body, 'result', GENERAL_NS)
+ return result == 'SUCCESS'
+
+ def list_nodes(self):
+ nodes = self._to_nodes(
+ self.connection.request_with_orgId('server/deployed').object)
+ nodes.extend(self._to_nodes(
+ self.connection.request_with_orgId('server/pendingDeploy').object))
+ return nodes
+
+ def list_images(self, location=None):
+ """
+ return a list of available images
+ Currently only returns the default 'base OS images' provided by
+ opsource. Customer images (snapshots) are not yet supported.
+
+ @inherits: :class:`NodeDriver.list_images`
+ """
+ return self._to_base_images(
+ self.connection.request('base/image').object)
+
+ def list_sizes(self, location=None):
+ return [
+ NodeSize(id=1,
+ name="default",
+ ram=0,
+ disk=0,
+ bandwidth=0,
+ price=0,
+ driver=self.connection.driver),
+ ]
+
+ def list_locations(self):
+ """
+ list locations (datacenters) available for instantiating servers and
+ networks.
+
+ @inherits: :class:`NodeDriver.list_locations`
+ """
+ return self._to_locations(
+ self.connection.request_with_orgId('datacenter').object)
+
+ def list_networks(self, location=None):
+ """
+ List networks deployed across all data center locations for your
+ organization. The response includes the location of each network.
+
+
+ :keyword location: The location
+ :type location: :class:`NodeLocation`
+
+ :return: a list of OpsourceNetwork objects
+ :rtype: ``list`` of :class:`OpsourceNetwork`
+ """
+ return self._to_networks(
+ self.connection.request_with_orgId('networkWithLocation').object)
+
+ def _to_base_images(self, object):
+ images = []
+ for element in object.findall(fixxpath("ServerImage", SERVER_NS)):
+ images.append(self._to_base_image(element))
+
+ return images
+
+ def _to_base_image(self, element):
+ # Eventually we will probably need multiple _to_image() functions
+ # that parse differently than .
+ # DeployedImages are customer snapshot images, and ServerImages are
+ # 'base' images provided by opsource
+ location_id = findtext(element, 'location', SERVER_NS)
+ location = self.ex_get_location_by_id(location_id)
+
+ extra = {
+ 'description': findtext(element, 'description', SERVER_NS),
+ 'OS_type': findtext(element, 'operatingSystem/type', SERVER_NS),
+ 'OS_displayName': findtext(element, 'operatingSystem/displayName',
+ SERVER_NS),
+ 'cpuCount': findtext(element, 'cpuCount', SERVER_NS),
+ 'resourcePath': findtext(element, 'resourcePath', SERVER_NS),
+ 'memory': findtext(element, 'memory', SERVER_NS),
+ 'osStorage': findtext(element, 'osStorage', SERVER_NS),
+ 'additionalStorage': findtext(element, 'additionalStorage',
+ SERVER_NS),
+ 'created': findtext(element, 'created', SERVER_NS),
+ 'location': location,
+ }
+
+ return NodeImage(id=str(findtext(element, 'id', SERVER_NS)),
+ name=str(findtext(element, 'name', SERVER_NS)),
+ extra=extra,
+ driver=self.connection.driver)
+
+ def ex_start_node(self, node):
+ """
+ Powers on an existing deployed server
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ body = self.connection.request_with_orgId(
+ 'server/%s?start' % node.id).object
+ result = findtext(body, 'result', GENERAL_NS)
+ return result == 'SUCCESS'
+
+ def ex_shutdown_graceful(self, node):
+ """
+ This function will attempt to "gracefully" stop a server by
+ initiating a shutdown sequence within the guest operating system.
+ A successful response on this function means the system has
+ successfully passed the request into the operating system.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ body = self.connection.request_with_orgId(
+ 'server/%s?shutdown' % (node.id)).object
+ result = findtext(body, 'result', GENERAL_NS)
+ return result == 'SUCCESS'
+
+ def ex_power_off(self, node):
+ """
+ This function will abruptly power-off a server. Unlike
+ ex_shutdown_graceful, success ensures the node will stop but some OS
+ and application configurations may be adversely affected by the
+ equivalent of pulling the power plug out of the machine.
+
+ :param node: Node which should be used
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ body = self.connection.request_with_orgId(
+ 'server/%s?poweroff' % node.id).object
+ result = findtext(body, 'result', GENERAL_NS)
+ return result == 'SUCCESS'
+
+ def ex_list_networks(self):
+ """
+ List networks deployed across all data center locations for your
+ organization. The response includes the location of each network.
+
+ :return: a list of OpsourceNetwork objects
+ :rtype: ``list`` of :class:`OpsourceNetwork`
+ """
+ response = self.connection.request_with_orgId('networkWithLocation') \
+ .object
+ return self._to_networks(response)
+
+ def ex_get_location_by_id(self, id):
+ """
+ Get location by ID.
+
+ :param id: ID of the node location which should be used
+ :type id: ``str``
+
+ :rtype: :class:`NodeLocation`
+ """
+ location = None
+ if id is not None:
+ location = list(
+ filter(lambda x: x.id == id, self.list_locations()))[0]
+ return location
+
+ def _to_networks(self, object):
+ networks = []
+ for element in findall(object, 'network', NETWORK_NS):
+ networks.append(self._to_network(element))
+
+ return networks
+
+ def _to_network(self, element):
+ multicast = False
+ if findtext(element, 'multicast', NETWORK_NS) == 'true':
+ multicast = True
+
+ status = self._to_status(element.find(fixxpath('status', NETWORK_NS)))
+
+ location_id = findtext(element, 'location', NETWORK_NS)
+ location = self.ex_get_location_by_id(location_id)
+
+ return OpsourceNetwork(id=findtext(element, 'id', NETWORK_NS),
+ name=findtext(element, 'name', NETWORK_NS),
+ description=findtext(element, 'description',
+ NETWORK_NS),
+ location=location,
+ privateNet=findtext(element, 'privateNet',
+ NETWORK_NS),
+ multicast=multicast,
+ status=status)
+
+ def _to_locations(self, object):
+ locations = []
+ for element in object.findall(fixxpath('datacenter', DATACENTER_NS)):
+ locations.append(self._to_location(element))
+
+ return locations
+
+ def _to_location(self, element):
+ l = NodeLocation(id=findtext(element, 'location', DATACENTER_NS),
+ name=findtext(element, 'displayName', DATACENTER_NS),
+ country=findtext(element, 'country', DATACENTER_NS),
+ driver=self)
+ return l
+
+ def _to_nodes(self, object):
+ node_elements = object.findall(fixxpath('DeployedServer', SERVER_NS))
+ node_elements.extend(object.findall(
+ fixxpath('PendingDeployServer', SERVER_NS)))
+ return [self._to_node(el) for el in node_elements]
+
+ def _to_node(self, element):
+ if findtext(element, 'isStarted', SERVER_NS) == 'true':
+ state = NodeState.RUNNING
+ else:
+ state = NodeState.TERMINATED
+
+ status = self._to_status(element.find(fixxpath('status', SERVER_NS)))
+
+ extra = {
+ 'description': findtext(element, 'description', SERVER_NS),
+ 'sourceImageId': findtext(element, 'sourceImageId', SERVER_NS),
+ 'networkId': findtext(element, 'networkId', SERVER_NS),
+ 'machineName': findtext(element, 'machineName', SERVER_NS),
+ 'deployedTime': findtext(element, 'deployedTime', SERVER_NS),
+ 'cpuCount': findtext(element, 'machineSpecification/cpuCount',
+ SERVER_NS),
+ 'memoryMb': findtext(element, 'machineSpecification/memoryMb',
+ SERVER_NS),
+ 'osStorageGb': findtext(element,
+ 'machineSpecification/osStorageGb',
+ SERVER_NS),
+ 'additionalLocalStorageGb': findtext(
+ element, 'machineSpecification/additionalLocalStorageGb',
+ SERVER_NS),
+ 'OS_type': findtext(element,
+ 'machineSpecification/operatingSystem/type',
+ SERVER_NS),
+ 'OS_displayName': findtext(
+ element, 'machineSpecification/operatingSystem/displayName',
+ SERVER_NS),
+ 'status': status,
+ }
+
+ public_ip = findtext(element, 'publicIpAddress', SERVER_NS)
+
+ n = Node(id=findtext(element, 'id', SERVER_NS),
+ name=findtext(element, 'name', SERVER_NS),
+ state=state,
+ public_ips=[public_ip] if public_ip is not None else [],
+ private_ips=findtext(element, 'privateIpAddress', SERVER_NS),
+ driver=self.connection.driver,
+ extra=extra)
+ return n
+
+ def _to_status(self, element):
+ if element is None:
+ return OpsourceStatus()
+ s = OpsourceStatus(action=findtext(element, 'action', SERVER_NS),
+ requestTime=findtext(element, 'requestTime',
+ SERVER_NS),
+ userName=findtext(element, 'userName',
+ SERVER_NS),
+ numberOfSteps=findtext(element, 'numberOfSteps',
+ SERVER_NS),
+ step_name=findtext(element, 'step/name',
+ SERVER_NS),
+ step_number=findtext(element, 'step_number',
+ SERVER_NS),
+ step_percentComplete=findtext(
+ element, 'step/percentComplete', SERVER_NS),
+ failureReason=findtext(element, 'failureReason',
+ SERVER_NS))
+ return s
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/rackspace.py b/awx/lib/site-packages/libcloud/compute/drivers/rackspace.py
new file mode 100644
index 0000000000..367facd024
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/rackspace.py
@@ -0,0 +1,230 @@
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Rackspace driver
+"""
+from libcloud.compute.types import Provider, LibcloudError
+from libcloud.compute.base import NodeLocation, VolumeSnapshot
+from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection,\
+ OpenStack_1_0_NodeDriver, OpenStack_1_0_Response
+from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\
+ OpenStack_1_1_NodeDriver
+
+from libcloud.common.rackspace import AUTH_URL
+
+
+ENDPOINT_ARGS_MAP = {
+ 'dfw': {'service_type': 'compute',
+ 'name': 'cloudServersOpenStack',
+ 'region': 'DFW'},
+ 'ord': {'service_type': 'compute',
+ 'name': 'cloudServersOpenStack',
+ 'region': 'ORD'},
+ 'iad': {'service_type': 'compute',
+ 'name': 'cloudServersOpenStack',
+ 'region': 'IAD'},
+ 'lon': {'service_type': 'compute',
+ 'name': 'cloudServersOpenStack',
+ 'region': 'LON'},
+ 'syd': {'service_type': 'compute',
+ 'name': 'cloudServersOpenStack',
+ 'region': 'SYD'},
+ 'hkg': {'service_type': 'compute',
+ 'name': 'cloudServersOpenStack',
+ 'region': 'HKG'},
+
+}
+
+
+class RackspaceFirstGenConnection(OpenStack_1_0_Connection):
+ """
+ Connection class for the Rackspace first-gen driver.
+ """
+ responseCls = OpenStack_1_0_Response
+ XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0'
+ auth_url = AUTH_URL
+ _auth_version = '2.0'
+ cache_busting = True
+
+ def __init__(self, *args, **kwargs):
+ self.region = kwargs.pop('region', None)
+ super(RackspaceFirstGenConnection, self).__init__(*args, **kwargs)
+
+ def get_endpoint(self):
+ ep = {}
+
+ if '2.0' in self._auth_version:
+ ep = self.service_catalog.get_endpoint(service_type='compute',
+ name='cloudServers')
+ else:
+ raise LibcloudError(
+ 'Auth version "%s" not supported' % (self._auth_version))
+
+ public_url = ep.get('publicURL', None)
+
+ if not public_url:
+ raise LibcloudError('Could not find specified endpoint')
+
+ # This is a nasty hack, but it's required because of how the
+ # auth system works.
+ # Old US accounts can access UK API endpoint, but they don't
+ # have this endpoint in the service catalog. Same goes for the
+ # old UK accounts and US endpoint.
+ if self.region == 'us':
+ # Old UK account, which only have uk endpoint in the catalog
+ public_url = public_url.replace('https://lon.servers.api',
+ 'https://servers.api')
+ elif self.region == 'uk':
+ # Old US account, which only has us endpoints in the catalog
+ public_url = public_url.replace('https://servers.api',
+ 'https://lon.servers.api')
+
+ return public_url
+
+
+class RackspaceFirstGenNodeDriver(OpenStack_1_0_NodeDriver):
+ name = 'Rackspace Cloud (First Gen)'
+ website = 'http://www.rackspace.com'
+ connectionCls = RackspaceFirstGenConnection
+ type = Provider.RACKSPACE_FIRST_GEN
+ api_name = 'rackspace'
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='us', **kwargs):
+ """
+ @inherits: :class:`NodeDriver.__init__`
+
+ :param region: Region ID which should be used
+ :type region: ``str``
+ """
+ if region not in ['us', 'uk']:
+ raise ValueError('Invalid region: %s' % (region))
+
+ super(RackspaceFirstGenNodeDriver, self).__init__(key=key,
+ secret=secret,
+ secure=secure,
+ host=host,
+ port=port,
+ region=region,
+ **kwargs)
+
+ def list_locations(self):
+ """
+ Lists available locations
+
+ Locations cannot be set or retrieved via the API, but currently
+ there are two locations, DFW and ORD.
+
+ @inherits: :class:`OpenStack_1_0_NodeDriver.list_locations`
+ """
+ if self.region == 'us':
+ locations = [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)]
+ elif self.region == 'uk':
+ locations = [NodeLocation(0, 'Rackspace UK London', 'UK', self)]
+
+ return locations
+
+ def _ex_connection_class_kwargs(self):
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['region'] = self.region
+ return kwargs
+
+
+class RackspaceConnection(OpenStack_1_1_Connection):
+ """
+ Connection class for the Rackspace next-gen OpenStack base driver.
+ """
+
+ auth_url = AUTH_URL
+ _auth_version = '2.0'
+
+ def __init__(self, *args, **kwargs):
+ self.region = kwargs.pop('region', None)
+ self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
+ super(RackspaceConnection, self).__init__(*args, **kwargs)
+
+ def get_endpoint(self):
+ if not self.get_endpoint_args:
+ raise LibcloudError(
+ 'RackspaceConnection must have get_endpoint_args set')
+
+ if '2.0' in self._auth_version:
+ ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
+ else:
+ raise LibcloudError(
+ 'Auth version "%s" not supported' % (self._auth_version))
+
+ public_url = ep.get('publicURL', None)
+
+ if not public_url:
+ raise LibcloudError('Could not find specified endpoint')
+
+ return public_url
+
+
+class RackspaceNodeDriver(OpenStack_1_1_NodeDriver):
+ name = 'Rackspace Cloud (Next Gen)'
+ website = 'http://www.rackspace.com'
+ connectionCls = RackspaceConnection
+ type = Provider.RACKSPACE
+
+ _networks_url_prefix = '/os-networksv2'
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='dfw', **kwargs):
+ """
+ @inherits: :class:`NodeDriver.__init__`
+
+ :param region: ID of the region which should be used.
+ :type region: ``str``
+ """
+ valid_regions = ENDPOINT_ARGS_MAP.keys()
+
+ if region not in valid_regions:
+ raise ValueError('Invalid region: %s' % (region))
+
+ if region == 'lon':
+ self.api_name = 'rackspacenovalon'
+ elif region == 'syd':
+ self.api_name = 'rackspacenovasyd'
+ else:
+ self.api_name = 'rackspacenovaus'
+
+ super(RackspaceNodeDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port,
+ region=region,
+ **kwargs)
+
+ def _to_snapshot(self, api_node):
+ if 'snapshot' in api_node:
+ api_node = api_node['snapshot']
+
+ extra = {'volume_id': api_node['volumeId'],
+ 'name': api_node['displayName'],
+ 'created': api_node['createdAt'],
+ 'description': api_node['displayDescription'],
+ 'status': api_node['status']}
+
+ snapshot = VolumeSnapshot(id=api_node['id'], driver=self,
+ size=api_node['size'],
+ extra=extra)
+ return snapshot
+
+ def _ex_connection_class_kwargs(self):
+ endpoint_args = ENDPOINT_ARGS_MAP[self.region]
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['region'] = self.region
+ kwargs['get_endpoint_args'] = endpoint_args
+ return kwargs
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/rimuhosting.py b/awx/lib/site-packages/libcloud/compute/drivers/rimuhosting.py
new file mode 100644
index 0000000000..acde574336
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/rimuhosting.py
@@ -0,0 +1,337 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+RimuHosting Driver
+"""
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.common.base import ConnectionKey, JsonResponse
+from libcloud.common.types import InvalidCredsError
+from libcloud.compute.types import Provider, NodeState
+from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
+from libcloud.compute.base import NodeImage
+
+API_CONTEXT = '/r'
+API_HOST = 'rimuhosting.com'
+
+
+class RimuHostingException(Exception):
+ """
+ Exception class for RimuHosting driver
+ """
+
+ def __str__(self):
+ return self.args[0]
+
+ def __repr__(self):
+ return "" % (self.args[0])
+
+
+class RimuHostingResponse(JsonResponse):
+ """
+ Response Class for RimuHosting driver
+ """
+ def success(self):
+ if self.status == 403:
+ raise InvalidCredsError()
+ return True
+
+ def parse_body(self):
+ try:
+ js = super(RimuHostingResponse, self).parse_body()
+ keys = list(js.keys())
+ if js[keys[0]]['response_type'] == "ERROR":
+ raise RimuHostingException(
+ js[keys[0]]['human_readable_message']
+ )
+ return js[keys[0]]
+ except KeyError:
+ raise RimuHostingException('Could not parse body: %s'
+ % (self.body))
+
+
+class RimuHostingConnection(ConnectionKey):
+ """
+ Connection class for the RimuHosting driver
+ """
+
+ api_context = API_CONTEXT
+ host = API_HOST
+ port = 443
+ responseCls = RimuHostingResponse
+
+ def __init__(self, key, secure=True):
+ # override __init__ so that we can set secure of False for testing
+ ConnectionKey.__init__(self, key, secure)
+
+ def add_default_headers(self, headers):
+ # We want JSON back from the server. Could be application/xml
+ # (but JSON is better).
+ headers['Accept'] = 'application/json'
+ # Must encode all data as json, or override this header.
+ headers['Content-Type'] = 'application/json'
+
+ headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key)
+ return headers
+
+ def request(self, action, params=None, data='', headers=None,
+ method='GET'):
+ if not headers:
+ headers = {}
+ if not params:
+ params = {}
+ # Override this method to prepend the api_context
+ return ConnectionKey.request(self, self.api_context + action,
+ params, data, headers, method)
+
+
+class RimuHostingNodeDriver(NodeDriver):
+ """
+ RimuHosting node driver
+ """
+
+ type = Provider.RIMUHOSTING
+ name = 'RimuHosting'
+ website = 'http://rimuhosting.com/'
+ connectionCls = RimuHostingConnection
+ features = {'create_node': ['password']}
+
+ def __init__(self, key, host=API_HOST, port=443,
+ api_context=API_CONTEXT, secure=True):
+ """
+ :param key: API key (required)
+ :type key: ``str``
+
+ :param host: hostname for connection
+ :type host: ``str``
+
+ :param port: Override port used for connections.
+ :type port: ``int``
+
+ :param api_context: Optional API context.
+ :type api_context: ``str``
+
+ :param secure: Weither to use HTTPS or HTTP.
+ :type secure: ``bool``
+
+ :rtype: ``None``
+ """
+ # Pass in some extra vars so that
+ self.key = key
+ self.secure = secure
+ self.connection = self.connectionCls(key, secure)
+ self.connection.host = host
+ self.connection.api_context = api_context
+ self.connection.port = port
+ self.connection.driver = self
+ self.connection.connect()
+
+ def _order_uri(self, node, resource):
+ # Returns the order uri with its resourse appended.
+ return "/orders/%s/%s" % (node.id, resource)
+
+ # TODO: Get the node state.
+ def _to_node(self, order):
+ n = Node(id=order['slug'],
+ name=order['domain_name'],
+ state=NodeState.RUNNING,
+ public_ips=(
+ [order['allocated_ips']['primary_ip']]
+ + order['allocated_ips']['secondary_ips']),
+ private_ips=[],
+ driver=self.connection.driver,
+ extra={
+ 'order_oid': order['order_oid'],
+ 'monthly_recurring_fee': order.get(
+ 'billing_info').get('monthly_recurring_fee')})
+ return n
+
+ def _to_size(self, plan):
+ return NodeSize(
+ id=plan['pricing_plan_code'],
+ name=plan['pricing_plan_description'],
+ ram=plan['minimum_memory_mb'],
+ disk=plan['minimum_disk_gb'],
+ bandwidth=plan['minimum_data_transfer_allowance_gb'],
+ price=plan['monthly_recurring_amt']['amt_usd'],
+ driver=self.connection.driver
+ )
+
+ def _to_image(self, image):
+ return NodeImage(id=image['distro_code'],
+ name=image['distro_description'],
+ driver=self.connection.driver)
+
+ def list_sizes(self, location=None):
+ # Returns a list of sizes (aka plans)
+ # Get plans. Note this is really just for libcloud.
+ # We are happy with any size.
+ if location is None:
+ location = ''
+ else:
+ location = ";dc_location=%s" % (location.id)
+
+ res = self.connection.request(
+ '/pricing-plans;server-type=VPS%s' % (location)).object
+ return list(map(lambda x: self._to_size(x), res['pricing_plan_infos']))
+
+ def list_nodes(self):
+ # Returns a list of Nodes
+ # Will only include active ones.
+ res = self.connection.request('/orders;include_inactive=N').object
+ return list(map(lambda x: self._to_node(x), res['about_orders']))
+
+ def list_images(self, location=None):
+ # Get all base images.
+ # TODO: add other image sources. (Such as a backup of a VPS)
+ # All Images are available for use at all locations
+ res = self.connection.request('/distributions').object
+ return list(map(lambda x: self._to_image(x), res['distro_infos']))
+
+ def reboot_node(self, node):
+ # Reboot
+ # PUT the state of RESTARTING to restart a VPS.
+ # All data is encoded as JSON
+ data = {'reboot_request': {'running_state': 'RESTARTING'}}
+ uri = self._order_uri(node, 'vps/running-state')
+ self.connection.request(uri, data=json.dumps(data), method='PUT')
+ # XXX check that the response was actually successful
+ return True
+
+ def destroy_node(self, node):
+ # Shutdown a VPS.
+ uri = self._order_uri(node, 'vps')
+ self.connection.request(uri, method='DELETE')
+ # XXX check that the response was actually successful
+ return True
+
+ def create_node(self, **kwargs):
+ """Creates a RimuHosting instance
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword name: Must be a FQDN. e.g example.com.
+ :type name: ``str``
+
+ :keyword ex_billing_oid: If not set,
+ a billing method is automatically picked.
+ :type ex_billing_oid: ``str``
+
+ :keyword ex_host_server_oid: The host server to set the VPS up on.
+ :type ex_host_server_oid: ``str``
+
+ :keyword ex_vps_order_oid_to_clone: Clone another VPS to use as
+ the image for the new VPS.
+ :type ex_vps_order_oid_to_clone: ``str``
+
+ :keyword ex_num_ips: Number of IPs to allocate. Defaults to 1.
+ :type ex_num_ips: ``int``
+
+ :keyword ex_extra_ip_reason: Reason for needing the extra IPs.
+ :type ex_extra_ip_reason: ``str``
+
+ :keyword ex_memory_mb: Memory to allocate to the VPS.
+ :type ex_memory_mb: ``int``
+
+ :keyword ex_disk_space_mb: Diskspace to allocate to the VPS.
+ Defaults to 4096 (4GB).
+ :type ex_disk_space_mb: ``int``
+
+ :keyword ex_disk_space_2_mb: Secondary disk size allocation.
+ Disabled by default.
+ :type ex_disk_space_2_mb: ``int``
+
+ :keyword ex_control_panel: Control panel to install on the VPS.
+ :type ex_control_panel: ``str``
+ """
+ # Note we don't do much error checking in this because we
+ # expect the API to error out if there is a problem.
+ name = kwargs['name']
+ image = kwargs['image']
+ size = kwargs['size']
+
+ data = {
+ 'instantiation_options': {
+ 'domain_name': name,
+ 'distro': image.id
+ },
+ 'pricing_plan_code': size.id,
+ 'vps_parameters': {}
+ }
+
+ if 'ex_control_panel' in kwargs:
+ data['instantiation_options']['control_panel'] = \
+ kwargs['ex_control_panel']
+
+ auth = self._get_and_check_auth(kwargs.get('auth'))
+ data['instantiation_options']['password'] = auth.password
+
+ if 'ex_billing_oid' in kwargs:
+ # TODO check for valid oid.
+ data['billing_oid'] = kwargs['ex_billing_oid']
+
+ if 'ex_host_server_oid' in kwargs:
+ data['host_server_oid'] = kwargs['ex_host_server_oid']
+
+ if 'ex_vps_order_oid_to_clone' in kwargs:
+ data['vps_order_oid_to_clone'] = \
+ kwargs['ex_vps_order_oid_to_clone']
+
+ if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1:
+ if 'ex_extra_ip_reason' not in kwargs:
+ raise RimuHostingException(
+ 'Need an reason for having an extra IP')
+ else:
+ if 'ip_request' not in data:
+ data['ip_request'] = {}
+ data['ip_request']['num_ips'] = int(kwargs['ex_num_ips'])
+ data['ip_request']['extra_ip_reason'] = \
+ kwargs['ex_extra_ip_reason']
+
+ if 'ex_memory_mb' in kwargs:
+ data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb']
+
+ if 'ex_disk_space_mb' in kwargs:
+ data['vps_parameters']['disk_space_mb'] = \
+ kwargs['ex_disk_space_mb']
+
+ if 'ex_disk_space_2_mb' in kwargs:
+ data['vps_parameters']['disk_space_2_mb'] =\
+ kwargs['ex_disk_space_2_mb']
+
+ # Don't send empty 'vps_parameters' attribute
+ if not data['vps_parameters']:
+ del data['vps_parameters']
+
+ res = self.connection.request(
+ '/orders/new-vps',
+ method='POST',
+ data=json.dumps({"new-vps": data})
+ ).object
+ node = self._to_node(res['about_order'])
+ node.extra['password'] = \
+ res['new_order_request']['instantiation_options']['password']
+ return node
+
+ def list_locations(self):
+ return [
+ NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self),
+ NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self),
+ NodeLocation('DCLONDON', "RimuHosting London", 'GB', self),
+ NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self),
+ ]
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/serverlove.py b/awx/lib/site-packages/libcloud/compute/drivers/serverlove.py
new file mode 100644
index 0000000000..2ba92a9560
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/serverlove.py
@@ -0,0 +1,83 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ServerLove Driver
+"""
+
+from libcloud.compute.types import Provider
+from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver
+from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection
+
+
+# API end-points
+API_ENDPOINTS = {
+ 'uk-1': {
+ 'name': 'United Kingdom, Manchester',
+ 'country': 'United Kingdom',
+ 'host': 'api.z1-man.serverlove.com'
+ }
+}
+
+# Default API end-point for the base connection class.
+DEFAULT_ENDPOINT = 'uk-1'
+
+# Retrieved from http://www.serverlove.com/cloud-server-faqs/api-questions/
+STANDARD_DRIVES = {
+ '679f5f44-0be7-4745-a658-cccd4334c1aa': {
+ 'uuid': '679f5f44-0be7-4745-a658-cccd4334c1aa',
+ 'description': 'CentOS 5.5',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ '5f2e0e29-2937-42b9-b362-d2d07eddbdeb': {
+ 'uuid': '5f2e0e29-2937-42b9-b362-d2d07eddbdeb',
+ 'description': 'Ubuntu Linux 10.04',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ '5795b68f-ed26-4639-b41d-c93235062b6b': {
+ 'uuid': '5795b68f-ed26-4639-b41d-c93235062b6b',
+ 'description': 'Debian Linux 5',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ '41993a02-0b22-4e49-bb47-0aa8975217e4': {
+ 'uuid': '41993a02-0b22-4e49-bb47-0aa8975217e4',
+ 'description': 'Windows Server 2008 R2 Standard',
+ 'size_gunzipped': '15GB',
+ 'supports_deployment': False,
+ },
+ '85623ca1-9c2a-4398-a771-9a43c347e86b': {
+ 'uuid': '85623ca1-9c2a-4398-a771-9a43c347e86b',
+ 'description': 'Windows Web Server 2008 R2',
+ 'size_gunzipped': '15GB',
+ 'supports_deployment': False,
+ }
+}
+
+
+class ServerLoveConnection(ElasticStackBaseConnection):
+ host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host']
+
+
+class ServerLoveNodeDriver(ElasticStackBaseNodeDriver):
+ type = Provider.SERVERLOVE
+ api_name = 'serverlove'
+ website = 'http://www.serverlove.com/'
+ name = 'ServerLove'
+ connectionCls = ServerLoveConnection
+ features = {'create_node': ['generates_password']}
+ _standard_drives = STANDARD_DRIVES
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/skalicloud.py b/awx/lib/site-packages/libcloud/compute/drivers/skalicloud.py
new file mode 100644
index 0000000000..c0b0d79404
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/skalicloud.py
@@ -0,0 +1,83 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+skalicloud Driver
+"""
+
+from libcloud.compute.types import Provider
+from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver
+from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection
+
+
+# API end-points
+API_ENDPOINTS = {
+ 'my-1': {
+ 'name': 'Malaysia, Kuala Lumpur',
+ 'country': 'Malaysia',
+ 'host': 'api.sdg-my.skalicloud.com'
+ }
+}
+
+# Default API end-point for the base connection class.
+DEFAULT_ENDPOINT = 'my-1'
+
+# Retrieved from http://www.skalicloud.com/cloud-api/
+STANDARD_DRIVES = {
+ '90aa51f2-15c0-4cff-81ee-e93aa20b9468': {
+ 'uuid': '90aa51f2-15c0-4cff-81ee-e93aa20b9468',
+ 'description': 'CentOS 5.5 -64bit',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f': {
+ 'uuid': 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f',
+ 'description': 'Debian 5 -64bit',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ '3051699a-a536-4220-aeb5-67f2ec101a09': {
+ 'uuid': '3051699a-a536-4220-aeb5-67f2ec101a09',
+ 'description': 'Ubuntu Server 10.10 -64bit',
+ 'size_gunzipped': '1GB',
+ 'supports_deployment': True,
+ },
+ '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9': {
+ 'uuid': '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9',
+ 'description': 'Windows 2008R2 Web Edition',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ },
+ '93bf390e-4f46-4252-a8bc-9d6d80e3f955': {
+ 'uuid': '93bf390e-4f46-4252-a8bc-9d6d80e3f955',
+ 'description': 'Windows Server 2008R2 Standard',
+ 'size_gunzipped': '13GB',
+ 'supports_deployment': False,
+ }
+}
+
+
+class SkaliCloudConnection(ElasticStackBaseConnection):
+ host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host']
+
+
+class SkaliCloudNodeDriver(ElasticStackBaseNodeDriver):
+ type = Provider.SKALICLOUD
+ api_name = 'skalicloud'
+ name = 'skalicloud'
+ website = 'http://www.skalicloud.com/'
+ connectionCls = SkaliCloudConnection
+ features = {"create_node": ["generates_password"]}
+ _standard_drives = STANDARD_DRIVES
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/softlayer.py b/awx/lib/site-packages/libcloud/compute/drivers/softlayer.py
new file mode 100644
index 0000000000..4fe8cdd2e3
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/softlayer.py
@@ -0,0 +1,474 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Softlayer driver
+"""
+
+import time
+
+from libcloud.common.base import ConnectionUserAndKey
+from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.compute.types import Provider, NodeState
+from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \
+ NodeImage
+
+DEFAULT_DOMAIN = 'example.com'
+DEFAULT_CPU_SIZE = 1
+DEFAULT_RAM_SIZE = 2048
+DEFAULT_DISK_SIZE = 100
+
+DATACENTERS = {
+ 'hou02': {'country': 'US'},
+ 'sea01': {'country': 'US', 'name': 'Seattle - West Coast U.S.'},
+ 'wdc01': {'country': 'US', 'name': 'Washington, DC - East Coast U.S.'},
+ 'dal01': {'country': 'US'},
+ 'dal02': {'country': 'US'},
+ 'dal04': {'country': 'US'},
+ 'dal05': {'country': 'US', 'name': 'Dallas - Central U.S.'},
+ 'dal06': {'country': 'US'},
+ 'dal07': {'country': 'US'},
+ 'sjc01': {'country': 'US', 'name': 'San Jose - West Coast U.S.'},
+ 'sng01': {'country': 'SG', 'name': 'Singapore - Southeast Asia'},
+ 'ams01': {'country': 'NL', 'name': 'Amsterdam - Western Europe'},
+}
+
+NODE_STATE_MAP = {
+ 'RUNNING': NodeState.RUNNING,
+ 'HALTED': NodeState.UNKNOWN,
+ 'PAUSED': NodeState.UNKNOWN,
+ 'INITIATING': NodeState.PENDING
+}
+
+SL_BASE_TEMPLATES = [
+ {
+ 'name': '1 CPU, 1GB ram, 25GB',
+ 'ram': 1024,
+ 'disk': 25,
+ 'cpus': 1,
+ }, {
+ 'name': '1 CPU, 1GB ram, 100GB',
+ 'ram': 1024,
+ 'disk': 100,
+ 'cpus': 1,
+ }, {
+ 'name': '1 CPU, 2GB ram, 100GB',
+ 'ram': 2 * 1024,
+ 'disk': 100,
+ 'cpus': 1,
+ }, {
+ 'name': '1 CPU, 4GB ram, 100GB',
+ 'ram': 4 * 1024,
+ 'disk': 100,
+ 'cpus': 1,
+ }, {
+ 'name': '2 CPU, 2GB ram, 100GB',
+ 'ram': 2 * 1024,
+ 'disk': 100,
+ 'cpus': 2,
+ }, {
+ 'name': '2 CPU, 4GB ram, 100GB',
+ 'ram': 4 * 1024,
+ 'disk': 100,
+ 'cpus': 2,
+ }, {
+ 'name': '2 CPU, 8GB ram, 100GB',
+ 'ram': 8 * 1024,
+ 'disk': 100,
+ 'cpus': 2,
+ }, {
+ 'name': '4 CPU, 4GB ram, 100GB',
+ 'ram': 4 * 1024,
+ 'disk': 100,
+ 'cpus': 4,
+ }, {
+ 'name': '4 CPU, 8GB ram, 100GB',
+ 'ram': 8 * 1024,
+ 'disk': 100,
+ 'cpus': 4,
+ }, {
+ 'name': '6 CPU, 4GB ram, 100GB',
+ 'ram': 4 * 1024,
+ 'disk': 100,
+ 'cpus': 6,
+ }, {
+ 'name': '6 CPU, 8GB ram, 100GB',
+ 'ram': 8 * 1024,
+ 'disk': 100,
+ 'cpus': 6,
+ }, {
+ 'name': '8 CPU, 8GB ram, 100GB',
+ 'ram': 8 * 1024,
+ 'disk': 100,
+ 'cpus': 8,
+ }, {
+ 'name': '8 CPU, 16GB ram, 100GB',
+ 'ram': 16 * 1024,
+ 'disk': 100,
+ 'cpus': 8,
+ }]
+
+SL_TEMPLATES = {}
+for i, template in enumerate(SL_BASE_TEMPLATES):
+ # Add local disk templates
+ local = template.copy()
+ local['local_disk'] = True
+ SL_TEMPLATES[i] = local
+
+
+class SoftLayerException(LibcloudError):
+ """
+ Exception class for SoftLayer driver
+ """
+ pass
+
+
+class SoftLayerResponse(XMLRPCResponse):
+ defaultExceptionCls = SoftLayerException
+ exceptions = {
+ 'SoftLayer_Account': InvalidCredsError,
+ }
+
+
+class SoftLayerConnection(XMLRPCConnection, ConnectionUserAndKey):
+ responseCls = SoftLayerResponse
+ host = 'api.softlayer.com'
+ endpoint = '/xmlrpc/v3'
+
+ def request(self, service, method, *args, **kwargs):
+ headers = {}
+ headers.update(self._get_auth_headers())
+ headers.update(self._get_init_params(service, kwargs.get('id')))
+ headers.update(
+ self._get_object_mask(service, kwargs.get('object_mask')))
+ headers.update(
+ self._get_object_mask(service, kwargs.get('object_mask')))
+
+ args = ({'headers': headers}, ) + args
+ endpoint = '%s/%s' % (self.endpoint, service)
+
+ return super(SoftLayerConnection, self).request(method, *args,
+ **{'endpoint':
+ endpoint})
+
+ def _get_auth_headers(self):
+ return {
+ 'authenticate': {
+ 'username': self.user_id,
+ 'apiKey': self.key
+ }
+ }
+
+ def _get_init_params(self, service, id):
+ if id is not None:
+ return {
+ '%sInitParameters' % service: {'id': id}
+ }
+ else:
+ return {}
+
+ def _get_object_mask(self, service, mask):
+ if mask is not None:
+ return {
+ '%sObjectMask' % service: {'mask': mask}
+ }
+ else:
+ return {}
+
+
+class SoftLayerNodeDriver(NodeDriver):
+ """
+ SoftLayer node driver
+
+ Extra node attributes:
+ - password: root password
+ - hourlyRecurringFee: hourly price (if applicable)
+ - recurringFee : flat rate (if applicable)
+ - recurringMonths : The number of months in which the recurringFee
+ will be incurred.
+ """
+ connectionCls = SoftLayerConnection
+ name = 'SoftLayer'
+ website = 'http://www.softlayer.com/'
+ type = Provider.SOFTLAYER
+
+ features = {'create_node': ['generates_password']}
+
+ def _to_node(self, host):
+ try:
+ password = \
+ host['operatingSystem']['passwords'][0]['password']
+ except (IndexError, KeyError):
+ password = None
+
+ hourlyRecurringFee = host.get('billingItem', {}).get(
+ 'hourlyRecurringFee', 0)
+ recurringFee = host.get('billingItem', {}).get('recurringFee', 0)
+ recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0)
+ createDate = host.get('createDate', None)
+
+ # When machine is launching it gets state halted
+ # we change this to pending
+ state = NODE_STATE_MAP.get(host['powerState']['keyName'],
+ NodeState.UNKNOWN)
+
+ if not password and state == NodeState.UNKNOWN:
+ state = NODE_STATE_MAP['INITIATING']
+
+ public_ips = []
+ private_ips = []
+
+ if 'primaryIpAddress' in host:
+ public_ips.append(host['primaryIpAddress'])
+
+ if 'primaryBackendIpAddress' in host:
+ private_ips.append(host['primaryBackendIpAddress'])
+
+ image = host.get('operatingSystem', {}).get('softwareLicense', {}) \
+ .get('softwareDescription', {}) \
+ .get('longDescription', None)
+
+ return Node(
+ id=host['id'],
+ name=host['fullyQualifiedDomainName'],
+ state=state,
+ public_ips=public_ips,
+ private_ips=private_ips,
+ driver=self,
+ extra={
+ 'hostname': host['hostname'],
+ 'fullyQualifiedDomainName': host['fullyQualifiedDomainName'],
+ 'password': password,
+ 'maxCpu': host.get('maxCpu', None),
+ 'datacenter': host.get('datacenter', {}).get('longName', None),
+ 'maxMemory': host.get('maxMemory', None),
+ 'image': image,
+ 'hourlyRecurringFee': hourlyRecurringFee,
+ 'recurringFee': recurringFee,
+ 'recurringMonths': recurringMonths,
+ 'created': createDate,
+ }
+ )
+
+ def destroy_node(self, node):
+ self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'deleteObject', id=node.id
+ )
+ return True
+
+ def reboot_node(self, node):
+ self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'rebootSoft', id=node.id
+ )
+ return True
+
+ def ex_stop_node(self, node):
+ self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'powerOff', id=node.id
+ )
+ return True
+
+ def ex_start_node(self, node):
+ self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'powerOn', id=node.id
+ )
+ return True
+
+ def _get_order_information(self, node_id, timeout=1200, check_interval=5):
+ mask = {
+ 'billingItem': '',
+ 'powerState': '',
+ 'operatingSystem': {'passwords': ''},
+ 'provisionDate': '',
+ }
+
+ for i in range(0, timeout, check_interval):
+ res = self.connection.request(
+ 'SoftLayer_Virtual_Guest',
+ 'getObject',
+ id=node_id,
+ object_mask=mask
+ ).object
+
+ if res.get('provisionDate', None):
+ return res
+
+ time.sleep(check_interval)
+
+ raise SoftLayerException('Timeout on getting node details')
+
+ def create_node(self, **kwargs):
+ """Create a new SoftLayer node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_domain: e.g. libcloud.org
+ :type ex_domain: ``str``
+ :keyword ex_cpus: e.g. 2
+ :type ex_cpus: ``int``
+ :keyword ex_disk: e.g. 100
+ :type ex_disk: ``int``
+ :keyword ex_ram: e.g. 2048
+ :type ex_ram: ``int``
+ :keyword ex_bandwidth: e.g. 100
+ :type ex_bandwidth: ``int``
+ :keyword ex_local_disk: e.g. True
+ :type ex_local_disk: ``bool``
+ :keyword ex_datacenter: e.g. Dal05
+ :type ex_datacenter: ``str``
+ :keyword ex_os: e.g. UBUNTU_LATEST
+ :type ex_os: ``str``
+ """
+ name = kwargs['name']
+ os = 'DEBIAN_LATEST'
+ if 'ex_os' in kwargs:
+ os = kwargs['ex_os']
+ elif 'image' in kwargs:
+ os = kwargs['image'].id
+
+ size = kwargs.get('size', NodeSize(id=123, name='Custom', ram=None,
+ disk=None, bandwidth=None,
+ price=None,
+ driver=self.connection.driver))
+ ex_size_data = SL_TEMPLATES.get(int(size.id)) or {}
+ # plan keys are ints
+ cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or \
+ DEFAULT_CPU_SIZE
+ ram = kwargs.get('ex_ram') or ex_size_data.get('ram') or \
+ DEFAULT_RAM_SIZE
+ bandwidth = kwargs.get('ex_bandwidth') or size.bandwidth or 10
+ hourly = 'true' if kwargs.get('ex_hourly', True) else 'false'
+
+ local_disk = 'true'
+ if ex_size_data.get('local_disk') is False:
+ local_disk = 'false'
+
+ if kwargs.get('ex_local_disk') is False:
+ local_disk = 'false'
+
+ disk_size = DEFAULT_DISK_SIZE
+ if size.disk:
+ disk_size = size.disk
+ if kwargs.get('ex_disk'):
+ disk_size = kwargs.get('ex_disk')
+
+ datacenter = ''
+ if 'ex_datacenter' in kwargs:
+ datacenter = kwargs['ex_datacenter']
+ elif 'location' in kwargs:
+ datacenter = kwargs['location'].id
+
+ domain = kwargs.get('ex_domain')
+ if domain is None:
+ if name.find('.') != -1:
+ domain = name[name.find('.') + 1:]
+ if domain is None:
+ # TODO: domain is a required argument for the Sofylayer API, but it
+ # it shouldn't be.
+ domain = DEFAULT_DOMAIN
+
+ newCCI = {
+ 'hostname': name,
+ 'domain': domain,
+ 'startCpus': cpu_count,
+ 'maxMemory': ram,
+ 'networkComponents': [{'maxSpeed': bandwidth}],
+ 'hourlyBillingFlag': hourly,
+ 'operatingSystemReferenceCode': os,
+ 'localDiskFlag': local_disk,
+ 'blockDevices': [
+ {
+ 'device': '0',
+ 'diskImage': {
+ 'capacity': disk_size,
+ }
+ }
+ ]
+
+ }
+
+ if datacenter:
+ newCCI['datacenter'] = {'name': datacenter}
+
+ res = self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'createObject', newCCI
+ ).object
+
+ node_id = res['id']
+ raw_node = self._get_order_information(node_id)
+
+ return self._to_node(raw_node)
+
+ def _to_image(self, img):
+ return NodeImage(
+ id=img['template']['operatingSystemReferenceCode'],
+ name=img['itemPrice']['item']['description'],
+ driver=self.connection.driver
+ )
+
+ def list_images(self, location=None):
+ result = self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions'
+ ).object
+ return [self._to_image(i) for i in result['operatingSystems']]
+
+ def _to_size(self, id, size):
+ return NodeSize(
+ id=id,
+ name=size['name'],
+ ram=size['ram'],
+ disk=size['disk'],
+ bandwidth=size.get('bandwidth'),
+ price=None,
+ driver=self.connection.driver,
+ )
+
+ def list_sizes(self, location=None):
+ return [self._to_size(id, s) for id, s in SL_TEMPLATES.items()]
+
+ def _to_loc(self, loc):
+ country = 'UNKNOWN'
+ loc_id = loc['template']['datacenter']['name']
+ name = loc_id
+
+ if loc_id in DATACENTERS:
+ country = DATACENTERS[loc_id]['country']
+ name = DATACENTERS[loc_id].get('name', loc_id)
+ return NodeLocation(id=loc_id, name=name,
+ country=country, driver=self)
+
+ def list_locations(self):
+ res = self.connection.request(
+ 'SoftLayer_Virtual_Guest', 'getCreateObjectOptions'
+ ).object
+ return [self._to_loc(l) for l in res['datacenters']]
+
+ def list_nodes(self):
+ mask = {
+ 'virtualGuests': {
+ 'powerState': '',
+ 'hostname': '',
+ 'maxMemory': '',
+ 'datacenter': '',
+ 'operatingSystem': {'passwords': ''},
+ 'billingItem': '',
+ },
+ }
+ res = self.connection.request(
+ "SoftLayer_Account",
+ "getVirtualGuests",
+ object_mask=mask
+ ).object
+ return [self._to_node(h) for h in res]
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/vcl.py b/awx/lib/site-packages/libcloud/compute/drivers/vcl.py
new file mode 100644
index 0000000000..a5ec4644d7
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/vcl.py
@@ -0,0 +1,302 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+VCL driver
+"""
+
+import time
+
+from libcloud.common.base import ConnectionUserAndKey
+from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.compute.types import Provider, NodeState
+from libcloud.compute.base import NodeDriver, Node
+from libcloud.compute.base import NodeSize, NodeImage
+
+
+class VCLResponse(XMLRPCResponse):
+ exceptions = {
+ 'VCL_Account': InvalidCredsError,
+ }
+
+
+class VCLConnection(XMLRPCConnection, ConnectionUserAndKey):
+ endpoint = '/index.php?mode=xmlrpccall'
+
+ def add_default_headers(self, headers):
+ headers['X-APIVERSION'] = '2'
+ headers['X-User'] = self.user_id
+ headers['X-Pass'] = self.key
+ return headers
+
+
+class VCLNodeDriver(NodeDriver):
+ """
+ VCL node driver
+
+ :keyword host: The VCL host to which you make requests(required)
+ :type host: ``str``
+ """
+
+ NODE_STATE_MAP = {
+ 'ready': NodeState.RUNNING,
+ 'failed': NodeState.TERMINATED,
+ 'timedout': NodeState.TERMINATED,
+ 'loading': NodeState.PENDING,
+ 'time': NodeState.PENDING,
+ 'future': NodeState.PENDING,
+ 'error': NodeState.UNKNOWN,
+ 'notready': NodeState.PENDING,
+ 'notavailable': NodeState.TERMINATED,
+ 'success': NodeState.PENDING
+ }
+
+ connectionCls = VCLConnection
+ name = 'VCL'
+ website = 'http://incubator.apache.org/vcl/'
+ type = Provider.VCL
+
+ def __init__(self, key, secret, secure=True, host=None, port=None, *args,
+ **kwargs):
+ """
+ :param key: API key or username to used (required)
+ :type key: ``str``
+
+ :param secret: Secret password to be used (required)
+ :type secret: ``str``
+
+ :param secure: Weither to use HTTPS or HTTP.
+ :type secure: ``bool``
+
+ :param host: Override hostname used for connections. (required)
+ :type host: ``str``
+
+ :param port: Override port used for connections.
+ :type port: ``int``
+
+ :rtype: ``None``
+ """
+ if not host:
+ raise Exception('When instantiating VCL driver directly ' +
+ 'you also need to provide host')
+
+ super(VCLNodeDriver, self).__init__(key, secret, secure=True,
+ host=None, port=None, *args,
+ **kwargs)
+
+ def _vcl_request(self, method, *args):
+ res = self.connection.request(
+ method,
+ *args
+ ).object
+ if(res['status'] == 'error'):
+ raise LibcloudError(res['errormsg'], driver=self)
+ return res
+
+ def create_node(self, **kwargs):
+ """Create a new VCL reservation
+ size and name ignored, image is the id from list_image
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword image: image is the id from list_image
+ :type image: ``str``
+
+ :keyword start: start time as unix timestamp
+ :type start: ``str``
+
+ :keyword length: length of time in minutes
+ :type length: ``str``
+ """
+
+ image = kwargs["image"]
+ start = kwargs.get('start', int(time.time()))
+ length = kwargs.get('length', '60')
+
+ res = self._vcl_request(
+ "XMLRPCaddRequest",
+ image.id,
+ start,
+ length
+ )
+
+ return Node(
+ id=res['requestid'],
+ name=image.name,
+ state=self.NODE_STATE_MAP[res['status']],
+ public_ips=[],
+ private_ips=[],
+ driver=self,
+ image=image.name
+ )
+
+ def destroy_node(self, node):
+ """
+ End VCL reservation for the node passed in.
+ Throws error if request fails.
+
+ :param node: The node to be destroyed
+ :type node: :class:`Node`
+
+ :rtype: ``bool``
+ """
+ try:
+ self._vcl_request(
+ 'XMLRPCendRequest',
+ node.id
+ )
+ except LibcloudError:
+ return False
+ return True
+
+ def _to_image(self, img):
+ return NodeImage(
+ id=img['id'],
+ name=img['name'],
+ driver=self.connection.driver
+ )
+
+ def list_images(self, location=None):
+ """
+ List images available to the user provided credentials
+
+ @inherits: :class:`NodeDriver.list_images`
+ """
+ res = self.connection.request(
+ "XMLRPCgetImages"
+ ).object
+ return [self._to_image(i) for i in res]
+
+ def list_sizes(self, location=None):
+ """
+ VCL does not choosing sizes for node creation.
+ Size of images are statically set by administrators.
+
+ @inherits: :class:`NodeDriver.list_sizes`
+ """
+ return [NodeSize(
+ 't1.micro',
+ 'none',
+ '512',
+ 0, 0, 0, self)
+ ]
+
+ def _to_connect_data(self, request_id, ipaddr):
+ res = self._vcl_request(
+ "XMLRPCgetRequestConnectData",
+ request_id,
+ ipaddr
+ )
+ return res
+
+ def _to_status(self, requestid, imagename, ipaddr):
+ res = self._vcl_request(
+ "XMLRPCgetRequestStatus",
+ requestid
+ )
+
+ public_ips = []
+ extra = []
+ if(res['status'] == 'ready'):
+ cdata = self._to_connect_data(requestid, ipaddr)
+ public_ips = [cdata['serverIP']]
+ extra = {
+ 'user': cdata['user'],
+ 'pass': cdata['password']
+ }
+ return Node(
+ id=requestid,
+ name=imagename,
+ state=self.NODE_STATE_MAP[res['status']],
+ public_ips=public_ips,
+ private_ips=[],
+ driver=self,
+ image=imagename,
+ extra=extra
+ )
+
+ def _to_nodes(self, res, ipaddr):
+ return [self._to_status(
+ h['requestid'],
+ h['imagename'],
+ ipaddr
+ ) for h in res]
+
+ def list_nodes(self, ipaddr):
+ """
+ List nodes
+
+ :param ipaddr: IP address which should be used
+ :type ipaddr: ``str``
+
+ :rtype: ``list`` of :class:`Node`
+ """
+ res = self._vcl_request(
+ "XMLRPCgetRequestIds"
+ )
+ return self._to_nodes(res['requests'], ipaddr)
+
+ def ex_update_node_access(self, node, ipaddr):
+ """
+ Update the remote ip accessing the node.
+
+ :param node: the reservation node to update
+ :type node: :class:`Node`
+
+ :param ipaddr: the ipaddr used to access the node
+ :type ipaddr: ``str``
+
+ :return: node with updated information
+ :rtype: :class:`Node`
+ """
+ return self._to_status(node.id, node.image, ipaddr)
+
+ def ex_extend_request_time(self, node, minutes):
+ """
+ Time in minutes to extend the requested node's reservation time
+
+ :param node: the reservation node to update
+ :type node: :class:`Node`
+
+ :param minutes: the number of mintes to update
+ :type minutes: ``str``
+
+ :return: true on success, throws error on failure
+ :rtype: ``bool``
+ """
+ return self._vcl_request(
+ "XMLRPCextendRequest",
+ node.id,
+ minutes
+ )
+
+ def ex_get_request_end_time(self, node):
+ """
+ Get the ending time of the node reservation.
+
+ :param node: the reservation node to update
+ :type node: :class:`Node`
+
+ :return: unix timestamp
+ :rtype: ``int``
+ """
+ res = self._vcl_request(
+ "XMLRPCgetRequestIds"
+ )
+ time = 0
+ for i in res['requests']:
+ if i['requestid'] == node.id:
+ time = i['end']
+ return time
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/vcloud.py b/awx/lib/site-packages/libcloud/compute/drivers/vcloud.py
new file mode 100644
index 0000000000..d18d390d4a
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/vcloud.py
@@ -0,0 +1,2090 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+VMware vCloud driver.
+"""
+import copy
+import sys
+import re
+import base64
+import os
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import urlparse
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import next
+
+urlparse = urlparse.urlparse
+
+import time
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from xml.parsers.expat import ExpatError
+
+from libcloud.common.base import XmlResponse, ConnectionUserAndKey
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState
+from libcloud.compute.base import Node, NodeDriver, NodeLocation
+from libcloud.compute.base import NodeSize, NodeImage
+
+"""
+From vcloud api "The VirtualQuantity element defines the number of MB
+of memory. This should be either 512 or a multiple of 1024 (1 GB)."
+"""
+VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1, 9)]
+
+# Default timeout (in seconds) for long running tasks
+DEFAULT_TASK_COMPLETION_TIMEOUT = 600
+
+DEFAULT_API_VERSION = '0.8'
+
+"""
+Valid vCloud API v1.5 input values.
+"""
+VIRTUAL_CPU_VALS_1_5 = [i for i in range(1, 9)]
+FENCE_MODE_VALS_1_5 = ['bridged', 'isolated', 'natRouted']
+IP_MODE_VALS_1_5 = ['POOL', 'DHCP', 'MANUAL', 'NONE']
+
+
+def fixxpath(root, xpath):
+ """ElementTree wants namespaces in its xpaths, so here we add them."""
+ namespace, root_tag = root.tag[1:].split("}", 1)
+ fixed_xpath = "/".join(["{%s}%s" % (namespace, e)
+ for e in xpath.split("/")])
+ return fixed_xpath
+
+
+def get_url_path(url):
+ return urlparse(url.strip()).path
+
+
+class Vdc(object):
+ """
+ Virtual datacenter (vDC) representation
+ """
+ def __init__(self, id, name, driver, allocation_model=None, cpu=None,
+ memory=None, storage=None):
+ self.id = id
+ self.name = name
+ self.driver = driver
+ self.allocation_model = allocation_model
+ self.cpu = cpu
+ self.memory = memory
+ self.storage = storage
+
+ def __repr__(self):
+ return (''
+ % (self.id, self.name, self.driver.name))
+
+
+class Capacity(object):
+ """
+ Represents CPU, Memory or Storage capacity of vDC.
+ """
+ def __init__(self, limit, used, units):
+ self.limit = limit
+ self.used = used
+ self.units = units
+
+ def __repr__(self):
+ return (''
+ % (self.limit, self.used, self.units))
+
+
+class ControlAccess(object):
+ """
+ Represents control access settings of a node
+ """
+ class AccessLevel(object):
+ READ_ONLY = 'ReadOnly'
+ CHANGE = 'Change'
+ FULL_CONTROL = 'FullControl'
+
+ def __init__(self, node, everyone_access_level, subjects=None):
+ self.node = node
+ self.everyone_access_level = everyone_access_level
+ if not subjects:
+ subjects = []
+ self.subjects = subjects
+
+ def __repr__(self):
+ return (''
+ % (self.node, self.everyone_access_level, self.subjects))
+
+
+class Subject(object):
+ """
+ User or group subject
+ """
+ def __init__(self, type, name, access_level, id=None):
+ self.type = type
+ self.name = name
+ self.access_level = access_level
+ self.id = id
+
+ def __repr__(self):
+ return (''
+ % (self.type, self.name, self.access_level))
+
+
+class InstantiateVAppXML(object):
+
+ def __init__(self, name, template, net_href, cpus, memory,
+ password=None, row=None, group=None):
+ self.name = name
+ self.template = template
+ self.net_href = net_href
+ self.cpus = cpus
+ self.memory = memory
+ self.password = password
+ self.row = row
+ self.group = group
+
+ self._build_xmltree()
+
+ def tostring(self):
+ return ET.tostring(self.root)
+
+ def _build_xmltree(self):
+ self.root = self._make_instantiation_root()
+
+ self._add_vapp_template(self.root)
+ instantiation_params = ET.SubElement(self.root,
+ "InstantiationParams")
+
+ # product and virtual hardware
+ self._make_product_section(instantiation_params)
+ self._make_virtual_hardware(instantiation_params)
+
+ network_config_section = ET.SubElement(instantiation_params,
+ "NetworkConfigSection")
+
+ network_config = ET.SubElement(network_config_section,
+ "NetworkConfig")
+ self._add_network_association(network_config)
+
+ def _make_instantiation_root(self):
+ return ET.Element(
+ "InstantiateVAppTemplateParams",
+ {'name': self.name,
+ 'xml:lang': 'en',
+ 'xmlns': "http://www.vmware.com/vcloud/v0.8",
+ 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
+ )
+
+ def _add_vapp_template(self, parent):
+ return ET.SubElement(
+ parent,
+ "VAppTemplate",
+ {'href': self.template}
+ )
+
+ def _make_product_section(self, parent):
+ prod_section = ET.SubElement(
+ parent,
+ "ProductSection",
+ {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8",
+ 'xmlns:ovf': "http://schemas.dmtf.org/ovf/envelope/1"}
+ )
+
+ if self.password:
+ self._add_property(prod_section, 'password', self.password)
+
+ if self.row:
+ self._add_property(prod_section, 'row', self.row)
+
+ if self.group:
+ self._add_property(prod_section, 'group', self.group)
+
+ return prod_section
+
+ def _add_property(self, parent, ovfkey, ovfvalue):
+ return ET.SubElement(
+ parent,
+ "Property",
+ {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1',
+ 'ovf:key': ovfkey,
+ 'ovf:value': ovfvalue}
+ )
+
+ def _make_virtual_hardware(self, parent):
+ vh = ET.SubElement(
+ parent,
+ "VirtualHardwareSection",
+ {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8"}
+ )
+
+ self._add_cpu(vh)
+ self._add_memory(vh)
+
+ return vh
+
+ def _add_cpu(self, parent):
+ cpu_item = ET.SubElement(
+ parent,
+ "Item",
+ {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"}
+ )
+ self._add_instance_id(cpu_item, '1')
+ self._add_resource_type(cpu_item, '3')
+ self._add_virtual_quantity(cpu_item, self.cpus)
+
+ return cpu_item
+
+ def _add_memory(self, parent):
+ mem_item = ET.SubElement(
+ parent,
+ 'Item',
+ {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"}
+ )
+ self._add_instance_id(mem_item, '2')
+ self._add_resource_type(mem_item, '4')
+ self._add_virtual_quantity(mem_item, self.memory)
+
+ return mem_item
+
+ def _add_instance_id(self, parent, id):
+ elm = ET.SubElement(
+ parent,
+ 'InstanceID',
+ {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
+ 'CIM_ResourceAllocationSettingData'}
+ )
+ elm.text = id
+ return elm
+
+ def _add_resource_type(self, parent, type):
+ elm = ET.SubElement(
+ parent,
+ 'ResourceType',
+ {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
+ 'CIM_ResourceAllocationSettingData'}
+ )
+ elm.text = type
+ return elm
+
+ def _add_virtual_quantity(self, parent, amount):
+ elm = ET.SubElement(
+ parent,
+ 'VirtualQuantity',
+ {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
+ 'CIM_ResourceAllocationSettingData'}
+ )
+ elm.text = amount
+ return elm
+
+ def _add_network_association(self, parent):
+ return ET.SubElement(
+ parent,
+ 'NetworkAssociation',
+ {'href': self.net_href}
+ )
+
+
+class VCloudResponse(XmlResponse):
+
+ def success(self):
+ return self.status in (httplib.OK, httplib.CREATED,
+ httplib.NO_CONTENT, httplib.ACCEPTED)
+
+
+class VCloudConnection(ConnectionUserAndKey):
+
+ """
+ Connection class for the vCloud driver
+ """
+
+ responseCls = VCloudResponse
+ token = None
+ host = None
+
+ def request(self, *args, **kwargs):
+ self._get_auth_token()
+ return super(VCloudConnection, self).request(*args, **kwargs)
+
+ def check_org(self):
+ # the only way to get our org is by logging in.
+ self._get_auth_token()
+
+ def _get_auth_headers(self):
+ """Some providers need different headers than others"""
+ return {
+ 'Authorization': "Basic %s" % base64.b64encode(
+ b('%s:%s' % (self.user_id, self.key))).decode('utf-8'),
+ 'Content-Length': '0',
+ 'Accept': 'application/*+xml'
+ }
+
+ def _get_auth_token(self):
+ if not self.token:
+ conn = self.conn_classes[self.secure](self.host,
+ self.port)
+ conn.request(method='POST', url='/api/v0.8/login',
+ headers=self._get_auth_headers())
+
+ resp = conn.getresponse()
+ headers = dict(resp.getheaders())
+ body = ET.XML(resp.read())
+
+ try:
+ self.token = headers['set-cookie']
+ except KeyError:
+ raise InvalidCredsError()
+
+ self.driver.org = get_url_path(
+ body.find(fixxpath(body, 'Org')).get('href')
+ )
+
+ def add_default_headers(self, headers):
+ headers['Cookie'] = self.token
+ headers['Accept'] = 'application/*+xml'
+ return headers
+
+
+class VCloudNodeDriver(NodeDriver):
+
+ """
+ vCloud node driver
+ """
+
+ type = Provider.VCLOUD
+ name = 'vCloud'
+ website = 'http://www.vmware.com/products/vcloud/'
+ connectionCls = VCloudConnection
+ org = None
+ _vdcs = None
+
+ NODE_STATE_MAP = {'0': NodeState.PENDING,
+ '1': NodeState.PENDING,
+ '2': NodeState.PENDING,
+ '3': NodeState.PENDING,
+ '4': NodeState.RUNNING}
+
+ features = {'create_node': ['password']}
+
+ def __new__(cls, key, secret=None, secure=True, host=None, port=None,
+ api_version=DEFAULT_API_VERSION, **kwargs):
+ if cls is VCloudNodeDriver:
+ if api_version == '0.8':
+ cls = VCloudNodeDriver
+ elif api_version == '1.5':
+ cls = VCloud_1_5_NodeDriver
+ elif api_version == '5.1':
+ cls = VCloud_5_1_NodeDriver
+ else:
+ raise NotImplementedError(
+ "No VCloudNodeDriver found for API version %s" %
+ (api_version))
+ return super(VCloudNodeDriver, cls).__new__(cls)
+
+ @property
+ def vdcs(self):
+ """
+ vCloud virtual data centers (vDCs).
+
+ :return: list of vDC objects
+ :rtype: ``list`` of :class:`Vdc`
+ """
+ if not self._vdcs:
+ self.connection.check_org() # make sure the org is set.
+ res = self.connection.request(self.org)
+ self._vdcs = [
+ self._to_vdc(
+ self.connection.request(get_url_path(i.get('href'))).object
+ )
+ for i in res.object.findall(fixxpath(res.object, "Link"))
+ if i.get('type') == 'application/vnd.vmware.vcloud.vdc+xml'
+ ]
+ return self._vdcs
+
+ def _to_vdc(self, vdc_elm):
+ return Vdc(vdc_elm.get('href'), vdc_elm.get('name'), self)
+
+ def _get_vdc(self, vdc_name):
+ vdc = None
+ if not vdc_name:
+ # Return the first organisation VDC found
+ vdc = self.vdcs[0]
+ else:
+ for v in self.vdcs:
+ if v.name == vdc_name:
+ vdc = v
+ if vdc is None:
+ raise ValueError('%s virtual data centre could not be found',
+ vdc_name)
+ return vdc
+
+ @property
+ def networks(self):
+ networks = []
+ for vdc in self.vdcs:
+ res = self.connection.request(get_url_path(vdc.id)).object
+ networks.extend(
+ [network
+ for network in res.findall(
+ fixxpath(res, 'AvailableNetworks/Network')
+
+ )]
+ )
+
+ return networks
+
+ def _to_image(self, image):
+ image = NodeImage(id=image.get('href'),
+ name=image.get('name'),
+ driver=self.connection.driver)
+ return image
+
+ def _to_node(self, elm):
+ state = self.NODE_STATE_MAP[elm.get('status')]
+ name = elm.get('name')
+ public_ips = []
+ private_ips = []
+
+ # Following code to find private IPs works for Terremark
+ connections = elm.findall('%s/%s' % (
+ '{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection',
+ fixxpath(elm, 'NetworkConnection'))
+ )
+ if not connections:
+ connections = elm.findall(
+ fixxpath(
+ elm,
+ 'Children/Vm/NetworkConnectionSection/NetworkConnection'))
+
+ for connection in connections:
+ ips = [ip.text
+ for ip
+ in connection.findall(fixxpath(elm, "IpAddress"))]
+ if connection.get('Network') == 'Internal':
+ private_ips.extend(ips)
+ else:
+ public_ips.extend(ips)
+
+ node = Node(id=elm.get('href'),
+ name=name,
+ state=state,
+ public_ips=public_ips,
+ private_ips=private_ips,
+ driver=self.connection.driver)
+
+ return node
+
+ def _get_catalog_hrefs(self):
+ res = self.connection.request(self.org)
+ catalogs = [
+ i.get('href')
+ for i in res.object.findall(fixxpath(res.object, "Link"))
+ if i.get('type') == 'application/vnd.vmware.vcloud.catalog+xml'
+ ]
+
+ return catalogs
+
+ def _wait_for_task_completion(self, task_href,
+ timeout=DEFAULT_TASK_COMPLETION_TIMEOUT):
+ start_time = time.time()
+ res = self.connection.request(get_url_path(task_href))
+ status = res.object.get('status')
+ while status != 'success':
+ if status == 'error':
+ # Get error reason from the response body
+ error_elem = res.object.find(fixxpath(res.object, 'Error'))
+ error_msg = "Unknown error"
+ if error_elem is not None:
+ error_msg = error_elem.get('message')
+ raise Exception("Error status returned by task %s.: %s"
+ % (task_href, error_msg))
+ if status == 'canceled':
+ raise Exception("Canceled status returned by task %s."
+ % task_href)
+ if (time.time() - start_time >= timeout):
+ raise Exception("Timeout (%s sec) while waiting for task %s."
+ % (timeout, task_href))
+ time.sleep(5)
+ res = self.connection.request(get_url_path(task_href))
+ status = res.object.get('status')
+
+ def destroy_node(self, node):
+ node_path = get_url_path(node.id)
+ # blindly poweroff node, it will throw an exception if already off
+ try:
+ res = self.connection.request('%s/power/action/poweroff'
+ % node_path,
+ method='POST')
+ self._wait_for_task_completion(res.object.get('href'))
+ except Exception:
+ pass
+
+ try:
+ res = self.connection.request('%s/action/undeploy' % node_path,
+ method='POST')
+ self._wait_for_task_completion(res.object.get('href'))
+ except ExpatError:
+ # The undeploy response is malformed XML atm.
+ # We can remove this whent he providers fix the problem.
+ pass
+ except Exception:
+ # Some vendors don't implement undeploy at all yet,
+ # so catch this and move on.
+ pass
+
+ res = self.connection.request(node_path, method='DELETE')
+ return res.status == httplib.ACCEPTED
+
+ def reboot_node(self, node):
+ res = self.connection.request('%s/power/action/reset'
+ % get_url_path(node.id),
+ method='POST')
+ return res.status in [httplib.ACCEPTED, httplib.NO_CONTENT]
+
+ def list_nodes(self):
+ return self.ex_list_nodes()
+
+ def ex_list_nodes(self, vdcs=None):
+ """
+ List all nodes across all vDCs. Using 'vdcs' you can specify which vDCs
+ should be queried.
+
+ :param vdcs: None, vDC or a list of vDCs to query. If None all vDCs
+ will be queried.
+ :type vdcs: :class:`Vdc`
+
+ :rtype: ``list`` of :class:`Node`
+ """
+ if not vdcs:
+ vdcs = self.vdcs
+ if not isinstance(vdcs, (list, tuple)):
+ vdcs = [vdcs]
+ nodes = []
+ for vdc in vdcs:
+ res = self.connection.request(get_url_path(vdc.id))
+ elms = res.object.findall(fixxpath(
+ res.object, "ResourceEntities/ResourceEntity")
+ )
+ vapps = [
+ (i.get('name'), i.get('href'))
+ for i in elms
+ if i.get('type') == 'application/vnd.vmware.vcloud.vApp+xml'
+ and i.get('name')
+ ]
+
+ for vapp_name, vapp_href in vapps:
+ try:
+ res = self.connection.request(
+ get_url_path(vapp_href),
+ headers={'Content-Type':
+ 'application/vnd.vmware.vcloud.vApp+xml'}
+ )
+ nodes.append(self._to_node(res.object))
+ except Exception:
+ # The vApp was probably removed since the previous vDC
+ # query, ignore
+ e = sys.exc_info()[1]
+ if not (e.args[0].tag.endswith('Error') and
+ e.args[0].get('minorErrorCode') ==
+ 'ACCESS_TO_RESOURCE_IS_FORBIDDEN'):
+ raise
+
+ return nodes
+
+ def _to_size(self, ram):
+ ns = NodeSize(
+ id=None,
+ name="%s Ram" % ram,
+ ram=ram,
+ disk=None,
+ bandwidth=None,
+ price=None,
+ driver=self.connection.driver
+ )
+ return ns
+
+ def list_sizes(self, location=None):
+ sizes = [self._to_size(i) for i in VIRTUAL_MEMORY_VALS]
+ return sizes
+
+ def _get_catalogitems_hrefs(self, catalog):
+ """Given a catalog href returns contained catalog item hrefs"""
+ res = self.connection.request(
+ get_url_path(catalog),
+ headers={
+ 'Content-Type': 'application/vnd.vmware.vcloud.catalog+xml'
+ }
+ ).object
+
+ cat_items = res.findall(fixxpath(res, "CatalogItems/CatalogItem"))
+ cat_item_hrefs = [i.get('href')
+ for i in cat_items
+ if i.get('type') ==
+ 'application/vnd.vmware.vcloud.catalogItem+xml']
+
+ return cat_item_hrefs
+
+ def _get_catalogitem(self, catalog_item):
+ """Given a catalog item href returns elementree"""
+ res = self.connection.request(
+ get_url_path(catalog_item),
+ headers={
+ 'Content-Type': 'application/vnd.vmware.vcloud.catalogItem+xml'
+ }
+ ).object
+
+ return res
+
+ def list_images(self, location=None):
+ images = []
+ for vdc in self.vdcs:
+ res = self.connection.request(get_url_path(vdc.id)).object
+ res_ents = res.findall(fixxpath(
+ res, "ResourceEntities/ResourceEntity")
+ )
+ images += [
+ self._to_image(i)
+ for i in res_ents
+ if i.get('type') ==
+ 'application/vnd.vmware.vcloud.vAppTemplate+xml'
+ ]
+
+ for catalog in self._get_catalog_hrefs():
+ for cat_item in self._get_catalogitems_hrefs(catalog):
+ res = self._get_catalogitem(cat_item)
+ res_ents = res.findall(fixxpath(res, 'Entity'))
+ images += [
+ self._to_image(i)
+ for i in res_ents
+ if i.get('type') ==
+ 'application/vnd.vmware.vcloud.vAppTemplate+xml'
+ ]
+
+ def idfun(image):
+ return image.id
+
+ return self._uniquer(images, idfun)
+
+ def _uniquer(self, seq, idfun=None):
+ if idfun is None:
+ def idfun(x):
+ return x
+ seen = {}
+ result = []
+ for item in seq:
+ marker = idfun(item)
+ if marker in seen:
+ continue
+ seen[marker] = 1
+ result.append(item)
+ return result
+
+ def create_node(self, **kwargs):
+ """
+ Creates and returns node.
+
+ :keyword ex_network: link to a "Network" e.g.,
+ ``https://services.vcloudexpress...``
+ :type ex_network: ``str``
+
+ :keyword ex_vdc: Name of organisation's virtual data
+ center where vApp VMs will be deployed.
+ :type ex_vdc: ``str``
+
+ :keyword ex_cpus: number of virtual cpus (limit depends on provider)
+ :type ex_cpus: ``int``
+
+ :type ex_row: ``str``
+
+ :type ex_group: ``str``
+ """
+ name = kwargs['name']
+ image = kwargs['image']
+ size = kwargs['size']
+
+ # Some providers don't require a network link
+ try:
+ network = kwargs.get('ex_network', self.networks[0].get('href'))
+ except IndexError:
+ network = ''
+
+ password = None
+ auth = self._get_and_check_auth(kwargs.get('auth'))
+ password = auth.password
+
+ instantiate_xml = InstantiateVAppXML(
+ name=name,
+ template=image.id,
+ net_href=network,
+ cpus=str(kwargs.get('ex_cpus', 1)),
+ memory=str(size.ram),
+ password=password,
+ row=kwargs.get('ex_row', None),
+ group=kwargs.get('ex_group', None)
+ )
+
+ vdc = self._get_vdc(kwargs.get('ex_vdc', None))
+
+ # Instantiate VM and get identifier.
+ content_type = \
+ 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
+ res = self.connection.request(
+ '%s/action/instantiateVAppTemplate' % get_url_path(vdc.id),
+ data=instantiate_xml.tostring(),
+ method='POST',
+ headers={'Content-Type': content_type}
+ )
+ vapp_path = get_url_path(res.object.get('href'))
+
+ # Deploy the VM from the identifier.
+ res = self.connection.request('%s/action/deploy' % vapp_path,
+ method='POST')
+
+ self._wait_for_task_completion(res.object.get('href'))
+
+ # Power on the VM.
+ res = self.connection.request('%s/power/action/powerOn' % vapp_path,
+ method='POST')
+
+ res = self.connection.request(vapp_path)
+ node = self._to_node(res.object)
+
+ if getattr(auth, "generated", False):
+ node.extra['password'] = auth.password
+
+ return node
+
+
+class HostingComConnection(VCloudConnection):
+
+ """
+ vCloud connection subclass for Hosting.com
+ """
+
+ host = "vcloud.safesecureweb.com"
+
+ def _get_auth_headers(self):
+ """hosting.com doesn't follow the standard vCloud authentication API"""
+ return {
+ 'Authentication': base64.b64encode(b('%s:%s' % (self.user_id,
+ self.key))),
+ 'Content-Length': '0'
+ }
+
+
+class HostingComDriver(VCloudNodeDriver):
+
+ """
+ vCloud node driver for Hosting.com
+ """
+ connectionCls = HostingComConnection
+
+
+class TerremarkConnection(VCloudConnection):
+
+ """
+ vCloud connection subclass for Terremark
+ """
+
+ host = "services.vcloudexpress.terremark.com"
+
+
+class TerremarkDriver(VCloudNodeDriver):
+
+ """
+ vCloud node driver for Terremark
+ """
+
+ connectionCls = TerremarkConnection
+
+ def list_locations(self):
+ return [NodeLocation(0, "Terremark Texas", 'US', self)]
+
+
+class VCloud_1_5_Connection(VCloudConnection):
+
+ def _get_auth_headers(self):
+ """Compatibility for using v1.5 API under vCloud Director 5.1"""
+ return {
+ 'Authorization': "Basic %s" % base64.b64encode(
+ b('%s:%s' % (self.user_id, self.key))).decode('utf-8'),
+ 'Content-Length': '0',
+ 'Accept': 'application/*+xml;version=1.5'
+ }
+
+ def _get_auth_token(self):
+ if not self.token:
+ # Log In
+ conn = self.conn_classes[self.secure](self.host,
+ self.port)
+ conn.request(method='POST', url='/api/sessions',
+ headers=self._get_auth_headers())
+
+ resp = conn.getresponse()
+ headers = dict(resp.getheaders())
+
+ # Set authorization token
+ try:
+ self.token = headers['x-vcloud-authorization']
+ except KeyError:
+ raise InvalidCredsError()
+
+ # Get the URL of the Organization
+ body = ET.XML(resp.read())
+ self.org_name = body.get('org')
+ org_list_url = get_url_path(
+ next((link for link in body.findall(fixxpath(body, 'Link'))
+ if link.get('type') ==
+ 'application/vnd.vmware.vcloud.orgList+xml')).get('href')
+ )
+
+ conn.request(method='GET', url=org_list_url,
+ headers=self.add_default_headers({}))
+ body = ET.XML(conn.getresponse().read())
+ self.driver.org = get_url_path(
+ next((org for org in body.findall(fixxpath(body, 'Org'))
+ if org.get('name') == self.org_name)).get('href')
+ )
+
+ def add_default_headers(self, headers):
+ headers['Accept'] = 'application/*+xml;version=1.5'
+ headers['x-vcloud-authorization'] = self.token
+ return headers
+
+
+class Instantiate_1_5_VAppXML(object):
+
+ def __init__(self, name, template, network, vm_network=None,
+ vm_fence=None):
+ self.name = name
+ self.template = template
+ self.network = network
+ self.vm_network = vm_network
+ self.vm_fence = vm_fence
+ self._build_xmltree()
+
+ def tostring(self):
+ return ET.tostring(self.root)
+
+ def _build_xmltree(self):
+ self.root = self._make_instantiation_root()
+
+ if self.network is not None:
+ instantionation_params = ET.SubElement(self.root,
+ 'InstantiationParams')
+ network_config_section = ET.SubElement(instantionation_params,
+ 'NetworkConfigSection')
+ ET.SubElement(
+ network_config_section,
+ 'Info',
+ {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1'}
+ )
+ network_config = ET.SubElement(network_config_section,
+ 'NetworkConfig')
+ self._add_network_association(network_config)
+
+ self._add_vapp_template(self.root)
+
+ def _make_instantiation_root(self):
+ return ET.Element(
+ 'InstantiateVAppTemplateParams',
+ {'name': self.name,
+ 'deploy': 'false',
+ 'powerOn': 'false',
+ 'xml:lang': 'en',
+ 'xmlns': 'http://www.vmware.com/vcloud/v1.5',
+ 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance'}
+ )
+
+ def _add_vapp_template(self, parent):
+ return ET.SubElement(
+ parent,
+ 'Source',
+ {'href': self.template}
+ )
+
+ def _add_network_association(self, parent):
+ if self.vm_network is None:
+ # Don't set a custom vApp VM network name
+ parent.set('networkName', self.network.get('name'))
+ else:
+ # Set a custom vApp VM network name
+ parent.set('networkName', self.vm_network)
+ configuration = ET.SubElement(parent, 'Configuration')
+ ET.SubElement(configuration, 'ParentNetwork',
+ {'href': self.network.get('href')})
+
+ if self.vm_fence is None:
+ fencemode = self.network.find(fixxpath(self.network,
+ 'Configuration/FenceMode')).text
+ else:
+ fencemode = self.vm_fence
+ ET.SubElement(configuration, 'FenceMode').text = fencemode
+
+
+class VCloud_1_5_NodeDriver(VCloudNodeDriver):
+ connectionCls = VCloud_1_5_Connection
+
+ # Based on
+ # http://pubs.vmware.com/vcloud-api-1-5/api_prog/
+ # GUID-843BE3AD-5EF6-4442-B864-BCAE44A51867.html
+ NODE_STATE_MAP = {'-1': NodeState.UNKNOWN,
+ '0': NodeState.PENDING,
+ '1': NodeState.PENDING,
+ '2': NodeState.PENDING,
+ '3': NodeState.PENDING,
+ '4': NodeState.RUNNING,
+ '5': NodeState.RUNNING,
+ '6': NodeState.UNKNOWN,
+ '7': NodeState.UNKNOWN,
+ '8': NodeState.STOPPED,
+ '9': NodeState.UNKNOWN,
+ '10': NodeState.UNKNOWN}
+
+ def list_locations(self):
+ return [NodeLocation(id=self.connection.host,
+ name=self.connection.host, country="N/A", driver=self)]
+
+ def ex_find_node(self, node_name, vdcs=None):
+ """
+ Searches for node across specified vDCs. This is more effective than
+ querying all nodes to get a single instance.
+
+ :param node_name: The name of the node to search for
+ :type node_name: ``str``
+
+ :param vdcs: None, vDC or a list of vDCs to search in. If None all vDCs
+ will be searched.
+ :type vdcs: :class:`Vdc`
+
+ :return: node instance or None if not found
+ :rtype: :class:`Node` or ``None``
+ """
+ if not vdcs:
+ vdcs = self.vdcs
+ if not getattr(vdcs, '__iter__', False):
+ vdcs = [vdcs]
+ for vdc in vdcs:
+ res = self.connection.request(get_url_path(vdc.id))
+ xpath = fixxpath(res.object, "ResourceEntities/ResourceEntity")
+ entity_elems = res.object.findall(xpath)
+ for entity_elem in entity_elems:
+ if entity_elem.get('type') == \
+ 'application/vnd.vmware.vcloud.vApp+xml' and \
+ entity_elem.get('name') == node_name:
+ path = get_url_path(entity_elem.get('href'))
+ headers = {'Content-Type':
+ 'application/vnd.vmware.vcloud.vApp+xml'}
+ res = self.connection.request(path,
+ headers=headers)
+ return self._to_node(res.object)
+ return None
+
+ def destroy_node(self, node):
+ try:
+ self.ex_undeploy_node(node)
+ except Exception:
+ # Some vendors don't implement undeploy at all yet,
+ # so catch this and move on.
+ pass
+
+ res = self.connection.request(get_url_path(node.id), method='DELETE')
+ return res.status == httplib.ACCEPTED
+
+ def reboot_node(self, node):
+ res = self.connection.request('%s/power/action/reset'
+ % get_url_path(node.id),
+ method='POST')
+ if res.status in [httplib.ACCEPTED, httplib.NO_CONTENT]:
+ self._wait_for_task_completion(res.object.get('href'))
+ return True
+ else:
+ return False
+
+ def ex_deploy_node(self, node):
+ """
+ Deploys existing node. Equal to vApp "start" operation.
+
+ :param node: The node to be deployed
+ :type node: :class:`Node`
+
+ :rtype: :class:`Node`
+ """
+ data = {'powerOn': 'true',
+ 'xmlns': 'http://www.vmware.com/vcloud/v1.5'}
+ deploy_xml = ET.Element('DeployVAppParams', data)
+ path = get_url_path(node.id)
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.deployVAppParams+xml'
+ }
+ res = self.connection.request('%s/action/deploy' % path,
+ data=ET.tostring(deploy_xml),
+ method='POST',
+ headers=headers)
+ self._wait_for_task_completion(res.object.get('href'))
+ res = self.connection.request(get_url_path(node.id))
+ return self._to_node(res.object)
+
+ def ex_undeploy_node(self, node):
+ """
+ Undeploys existing node. Equal to vApp "stop" operation.
+
+ :param node: The node to be deployed
+ :type node: :class:`Node`
+
+ :rtype: :class:`Node`
+ """
+ data = {'xmlns': 'http://www.vmware.com/vcloud/v1.5'}
+ undeploy_xml = ET.Element('UndeployVAppParams', data)
+ undeploy_power_action_xml = ET.SubElement(undeploy_xml,
+ 'UndeployPowerAction')
+ undeploy_power_action_xml.text = 'shutdown'
+
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.undeployVAppParams+xml'
+ }
+
+ try:
+ res = self.connection.request(
+ '%s/action/undeploy' % get_url_path(node.id),
+ data=ET.tostring(undeploy_xml),
+ method='POST',
+ headers=headers)
+
+ self._wait_for_task_completion(res.object.get('href'))
+ except Exception:
+ undeploy_power_action_xml.text = 'powerOff'
+ res = self.connection.request(
+ '%s/action/undeploy' % get_url_path(node.id),
+ data=ET.tostring(undeploy_xml),
+ method='POST',
+ headers=headers)
+ self._wait_for_task_completion(res.object.get('href'))
+
+ res = self.connection.request(get_url_path(node.id))
+ return self._to_node(res.object)
+
+ def ex_power_off_node(self, node):
+ """
+ Powers on all VMs under specified node. VMs need to be This operation
+ is allowed only when the vApp/VM is powered on.
+
+ :param node: The node to be powered off
+ :type node: :class:`Node`
+
+ :rtype: :class:`Node`
+ """
+ return self._perform_power_operation(node, 'powerOff')
+
+ def ex_power_on_node(self, node):
+ """
+ Powers on all VMs under specified node. This operation is allowed
+ only when the vApp/VM is powered off or suspended.
+
+ :param node: The node to be powered on
+ :type node: :class:`Node`
+
+ :rtype: :class:`Node`
+ """
+ return self._perform_power_operation(node, 'powerOn')
+
+ def ex_shutdown_node(self, node):
+ """
+ Shutdowns all VMs under specified node. This operation is allowed only
+ when the vApp/VM is powered on.
+
+ :param node: The node to be shut down
+ :type node: :class:`Node`
+
+ :rtype: :class:`Node`
+ """
+ return self._perform_power_operation(node, 'shutdown')
+
+ def ex_suspend_node(self, node):
+ """
+ Suspends all VMs under specified node. This operation is allowed only
+ when the vApp/VM is powered on.
+
+ :param node: The node to be suspended
+ :type node: :class:`Node`
+
+ :rtype: :class:`Node`
+ """
+ return self._perform_power_operation(node, 'suspend')
+
+ def _perform_power_operation(self, node, operation):
+ res = self.connection.request(
+ '%s/power/action/%s' % (get_url_path(node.id), operation),
+ method='POST')
+ self._wait_for_task_completion(res.object.get('href'))
+ res = self.connection.request(get_url_path(node.id))
+ return self._to_node(res.object)
+
+ def ex_get_control_access(self, node):
+ """
+ Returns the control access settings for specified node.
+
+ :param node: node to get the control access for
+ :type node: :class:`Node`
+
+ :rtype: :class:`ControlAccess`
+ """
+ res = self.connection.request(
+ '%s/controlAccess' % get_url_path(node.id))
+ everyone_access_level = None
+ is_shared_elem = res.object.find(
+ fixxpath(res.object, "IsSharedToEveryone"))
+ if is_shared_elem is not None and is_shared_elem.text == 'true':
+ everyone_access_level = res.object.find(
+ fixxpath(res.object, "EveryoneAccessLevel")).text
+
+ # Parse all subjects
+ subjects = []
+ xpath = fixxpath(res.object, "AccessSettings/AccessSetting")
+ for elem in res.object.findall(xpath):
+ access_level = elem.find(fixxpath(res.object, "AccessLevel")).text
+ subject_elem = elem.find(fixxpath(res.object, "Subject"))
+ if subject_elem.get('type') == \
+ 'application/vnd.vmware.admin.group+xml':
+ subj_type = 'group'
+ else:
+ subj_type = 'user'
+
+ path = get_url_path(subject_elem.get('href'))
+ res = self.connection.request(path)
+ name = res.object.get('name')
+ subject = Subject(type=subj_type,
+ name=name,
+ access_level=access_level,
+ id=subject_elem.get('href'))
+ subjects.append(subject)
+
+ return ControlAccess(node, everyone_access_level, subjects)
+
+ def ex_set_control_access(self, node, control_access):
+ """
+ Sets control access for the specified node.
+
+ :param node: node
+ :type node: :class:`Node`
+
+ :param control_access: control access settings
+ :type control_access: :class:`ControlAccess`
+
+ :rtype: ``None``
+ """
+ xml = ET.Element('ControlAccessParams',
+ {'xmlns': 'http://www.vmware.com/vcloud/v1.5'})
+ shared_to_everyone = ET.SubElement(xml, 'IsSharedToEveryone')
+ if control_access.everyone_access_level:
+ shared_to_everyone.text = 'true'
+ everyone_access_level = ET.SubElement(xml, 'EveryoneAccessLevel')
+ everyone_access_level.text = control_access.everyone_access_level
+ else:
+ shared_to_everyone.text = 'false'
+
+ # Set subjects
+ if control_access.subjects:
+ access_settings_elem = ET.SubElement(xml, 'AccessSettings')
+ for subject in control_access.subjects:
+ setting = ET.SubElement(access_settings_elem, 'AccessSetting')
+ if subject.id:
+ href = subject.id
+ else:
+ res = self.ex_query(type=subject.type, filter='name==' +
+ subject.name)
+ if not res:
+ raise LibcloudError('Specified subject "%s %s" not found '
+ % (subject.type, subject.name))
+ href = res[0]['href']
+ ET.SubElement(setting, 'Subject', {'href': href})
+ ET.SubElement(setting, 'AccessLevel').text = subject.access_level
+
+ headers = {
+ 'Content-Type': 'application/vnd.vmware.vcloud.controlAccess+xml'
+ }
+ self.connection.request(
+ '%s/action/controlAccess' % get_url_path(node.id),
+ data=ET.tostring(xml),
+ headers=headers,
+ method='POST')
+
+ def ex_get_metadata(self, node):
+ """
+ :param node: node
+ :type node: :class:`Node`
+
+ :return: dictionary mapping metadata keys to metadata values
+ :rtype: dictionary mapping ``str`` to ``str``
+ """
+ res = self.connection.request('%s/metadata' % (get_url_path(node.id)))
+ xpath = fixxpath(res.object, 'MetadataEntry')
+ metadata_entries = res.object.findall(xpath)
+ res_dict = {}
+
+ for entry in metadata_entries:
+ key = entry.findtext(fixxpath(res.object, 'Key'))
+ value = entry.findtext(fixxpath(res.object, 'Value'))
+ res_dict[key] = value
+
+ return res_dict
+
+ def ex_set_metadata_entry(self, node, key, value):
+ """
+ :param node: node
+ :type node: :class:`Node`
+
+ :param key: metadata key to be set
+ :type key: ``str``
+
+ :param value: metadata value to be set
+ :type value: ``str``
+
+ :rtype: ``None``
+ """
+ metadata_elem = ET.Element(
+ 'Metadata',
+ {'xmlns': "http://www.vmware.com/vcloud/v1.5",
+ 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
+ )
+ entry = ET.SubElement(metadata_elem, 'MetadataEntry')
+ key_elem = ET.SubElement(entry, 'Key')
+ key_elem.text = key
+ value_elem = ET.SubElement(entry, 'Value')
+ value_elem.text = value
+
+ # send it back to the server
+ res = self.connection.request(
+ '%s/metadata' % get_url_path(node.id),
+ data=ET.tostring(metadata_elem),
+ headers={
+ 'Content-Type': 'application/vnd.vmware.vcloud.metadata+xml'
+ },
+ method='POST')
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def ex_query(self, type, filter=None, page=1, page_size=100, sort_asc=None,
+ sort_desc=None):
+ """
+ Queries vCloud for specified type. See
+ http://www.vmware.com/pdf/vcd_15_api_guide.pdf for details. Each
+ element of the returned list is a dictionary with all attributes from
+ the record.
+
+ :param type: type to query (r.g. user, group, vApp etc.)
+ :type type: ``str``
+
+ :param filter: filter expression (see documentation for syntax)
+ :type filter: ``str``
+
+ :param page: page number
+ :type page: ``int``
+
+ :param page_size: page size
+ :type page_size: ``int``
+
+ :param sort_asc: sort in ascending order by specified field
+ :type sort_asc: ``str``
+
+ :param sort_desc: sort in descending order by specified field
+ :type sort_desc: ``str``
+
+ :rtype: ``list`` of dict
+ """
+ # This is a workaround for filter parameter encoding
+ # the urllib encodes (name==Developers%20Only) into
+ # %28name%3D%3DDevelopers%20Only%29) which is not accepted by vCloud
+ params = {
+ 'type': type,
+ 'pageSize': page_size,
+ 'page': page,
+ }
+ if sort_asc:
+ params['sortAsc'] = sort_asc
+ if sort_desc:
+ params['sortDesc'] = sort_desc
+
+ url = '/api/query?' + urlencode(params)
+ if filter:
+ if not filter.startswith('('):
+ filter = '(' + filter + ')'
+ url += '&filter=' + filter.replace(' ', '+')
+
+ results = []
+ res = self.connection.request(url)
+ for elem in res.object:
+ if not elem.tag.endswith('Link'):
+ result = elem.attrib
+ result['type'] = elem.tag.split('}')[1]
+ results.append(result)
+ return results
+
+ def create_node(self, **kwargs):
+ """
+ Creates and returns node. If the source image is:
+ - vApp template - a new vApp is instantiated from template
+ - existing vApp - a new vApp is cloned from the source vApp. Can
+ not clone more vApps is parallel otherwise
+ resource busy error is raised.
+
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword image: OS Image to boot on node. (required). Can be a
+ NodeImage or existing Node that will be cloned.
+ :type image: :class:`NodeImage` or :class:`Node`
+
+ :keyword ex_network: Organisation's network name for attaching vApp
+ VMs to.
+ :type ex_network: ``str``
+
+ :keyword ex_vdc: Name of organisation's virtual data center where
+ vApp VMs will be deployed.
+ :type ex_vdc: ``str``
+
+ :keyword ex_vm_names: list of names to be used as a VM and computer
+ name. The name must be max. 15 characters
+ long and follow the host name requirements.
+ :type ex_vm_names: ``list`` of ``str``
+
+ :keyword ex_vm_cpu: number of virtual CPUs/cores to allocate for
+ each vApp VM.
+ :type ex_vm_cpu: ``int``
+
+ :keyword ex_vm_memory: amount of memory in MB to allocate for each
+ vApp VM.
+ :type ex_vm_memory: ``int``
+
+ :keyword ex_vm_script: full path to file containing guest
+ customisation script for each vApp VM.
+ Useful for creating users & pushing out
+ public SSH keys etc.
+ :type ex_vm_script: ``str``
+
+ :keyword ex_vm_network: Override default vApp VM network name.
+ Useful for when you've imported an OVF
+ originating from outside of the vCloud.
+ :type ex_vm_network: ``str``
+
+ :keyword ex_vm_fence: Fence mode for connecting the vApp VM network
+ (ex_vm_network) to the parent
+ organisation network (ex_network).
+ :type ex_vm_fence: ``str``
+
+ :keyword ex_vm_ipmode: IP address allocation mode for all vApp VM
+ network connections.
+ :type ex_vm_ipmode: ``str``
+
+ :keyword ex_deploy: set to False if the node shouldn't be deployed
+ (started) after creation
+ :type ex_deploy: ``bool``
+
+ :keyword ex_clone_timeout: timeout in seconds for clone/instantiate
+ VM operation.
+ Cloning might be a time consuming
+ operation especially when linked clones
+ are disabled or VMs are created on
+ different datastores.
+ Overrides the default task completion
+ value.
+ :type ex_clone_timeout: ``int``
+ """
+ name = kwargs['name']
+ image = kwargs['image']
+ ex_vm_names = kwargs.get('ex_vm_names')
+ ex_vm_cpu = kwargs.get('ex_vm_cpu')
+ ex_vm_memory = kwargs.get('ex_vm_memory')
+ ex_vm_script = kwargs.get('ex_vm_script')
+ ex_vm_fence = kwargs.get('ex_vm_fence', None)
+ ex_network = kwargs.get('ex_network', None)
+ ex_vm_network = kwargs.get('ex_vm_network', None)
+ ex_vm_ipmode = kwargs.get('ex_vm_ipmode', None)
+ ex_deploy = kwargs.get('ex_deploy', True)
+ ex_vdc = kwargs.get('ex_vdc', None)
+ ex_clone_timeout = kwargs.get('ex_clone_timeout',
+ DEFAULT_TASK_COMPLETION_TIMEOUT)
+
+ self._validate_vm_names(ex_vm_names)
+ self._validate_vm_cpu(ex_vm_cpu)
+ self._validate_vm_memory(ex_vm_memory)
+ self._validate_vm_fence(ex_vm_fence)
+ self._validate_vm_ipmode(ex_vm_ipmode)
+ ex_vm_script = self._validate_vm_script(ex_vm_script)
+
+ # Some providers don't require a network link
+ if ex_network:
+ network_href = self._get_network_href(ex_network)
+ network_elem = self.connection.request(
+ get_url_path(network_href)).object
+ else:
+ network_elem = None
+
+ vdc = self._get_vdc(ex_vdc)
+
+ if self._is_node(image):
+ vapp_name, vapp_href = self._clone_node(name,
+ image,
+ vdc,
+ ex_clone_timeout)
+ else:
+ vapp_name, vapp_href = self._instantiate_node(name, image,
+ network_elem,
+ vdc, ex_vm_network,
+ ex_vm_fence,
+ ex_clone_timeout)
+
+ self._change_vm_names(vapp_href, ex_vm_names)
+ self._change_vm_cpu(vapp_href, ex_vm_cpu)
+ self._change_vm_memory(vapp_href, ex_vm_memory)
+ self._change_vm_script(vapp_href, ex_vm_script)
+ self._change_vm_ipmode(vapp_href, ex_vm_ipmode)
+
+ # Power on the VM.
+ if ex_deploy:
+ # Retry 3 times: when instantiating large number of VMs at the same
+ # time some may fail on resource allocation
+ retry = 3
+ while True:
+ try:
+ res = self.connection.request(
+ '%s/power/action/powerOn' % get_url_path(vapp_href),
+ method='POST')
+ self._wait_for_task_completion(res.object.get('href'))
+ break
+ except Exception:
+ if retry <= 0:
+ raise
+ retry -= 1
+ time.sleep(10)
+
+ res = self.connection.request(get_url_path(vapp_href))
+ node = self._to_node(res.object)
+ return node
+
+ def _instantiate_node(self, name, image, network_elem, vdc, vm_network,
+ vm_fence, instantiate_timeout):
+ instantiate_xml = Instantiate_1_5_VAppXML(
+ name=name,
+ template=image.id,
+ network=network_elem,
+ vm_network=vm_network,
+ vm_fence=vm_fence
+ )
+
+ # Instantiate VM and get identifier.
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
+ }
+ res = self.connection.request(
+ '%s/action/instantiateVAppTemplate' % get_url_path(vdc.id),
+ data=instantiate_xml.tostring(),
+ method='POST',
+ headers=headers
+ )
+ vapp_name = res.object.get('name')
+ vapp_href = res.object.get('href')
+
+ task_href = res.object.find(fixxpath(res.object, "Tasks/Task")).get(
+ 'href')
+ self._wait_for_task_completion(task_href, instantiate_timeout)
+ return vapp_name, vapp_href
+
+ def _clone_node(self, name, sourceNode, vdc, clone_timeout):
+ clone_xml = ET.Element(
+ "CloneVAppParams",
+ {'name': name, 'deploy': 'false', 'powerOn': 'false',
+ 'xmlns': "http://www.vmware.com/vcloud/v1.5",
+ 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"}
+ )
+ ET.SubElement(clone_xml,
+ 'Description').text = 'Clone of ' + sourceNode.name
+ ET.SubElement(clone_xml, 'Source', {'href': sourceNode.id})
+
+ headers = {
+ 'Content-Type': 'application/vnd.vmware.vcloud.cloneVAppParams+xml'
+ }
+ res = self.connection.request(
+ '%s/action/cloneVApp' % get_url_path(vdc.id),
+ data=ET.tostring(clone_xml),
+ method='POST',
+ headers=headers
+ )
+ vapp_name = res.object.get('name')
+ vapp_href = res.object.get('href')
+
+ task_href = res.object.find(
+ fixxpath(res.object, "Tasks/Task")).get('href')
+ self._wait_for_task_completion(task_href, clone_timeout)
+
+ res = self.connection.request(get_url_path(vapp_href))
+
+ vms = res.object.findall(fixxpath(res.object, "Children/Vm"))
+
+ # Fix the networking for VMs
+ for i, vm in enumerate(vms):
+ # Remove network
+ network_xml = ET.Element("NetworkConnectionSection", {
+ 'ovf:required': 'false',
+ 'xmlns': "http://www.vmware.com/vcloud/v1.5",
+ 'xmlns:ovf': 'http://schemas.dmtf.org/ovf/envelope/1'})
+ ET.SubElement(network_xml, "ovf:Info").text = \
+ 'Specifies the available VM network connections'
+
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ }
+ res = self.connection.request(
+ '%s/networkConnectionSection' % get_url_path(vm.get('href')),
+ data=ET.tostring(network_xml),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ # Re-add network
+ network_xml = vm.find(fixxpath(vm, 'NetworkConnectionSection'))
+ network_conn_xml = network_xml.find(
+ fixxpath(network_xml, 'NetworkConnection'))
+ network_conn_xml.set('needsCustomization', 'true')
+ network_conn_xml.remove(
+ network_conn_xml.find(fixxpath(network_xml, 'IpAddress')))
+ network_conn_xml.remove(
+ network_conn_xml.find(fixxpath(network_xml, 'MACAddress')))
+
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ }
+ res = self.connection.request(
+ '%s/networkConnectionSection' % get_url_path(vm.get('href')),
+ data=ET.tostring(network_xml),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ return vapp_name, vapp_href
+
+ def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu):
+ """
+ Sets the number of virtual CPUs for the specified VM or VMs under
+ the vApp. If the vapp_or_vm_id param represents a link to an vApp
+ all VMs that are attached to this vApp will be modified.
+
+ Please ensure that hot-adding a virtual CPU is enabled for the
+ powered on virtual machines. Otherwise use this method on undeployed
+ vApp.
+
+ :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If
+ a vApp ID is used here all attached VMs
+ will be modified
+ :type vapp_or_vm_id: ``str``
+
+ :keyword vm_cpu: number of virtual CPUs/cores to allocate for
+ specified VMs
+ :type vm_cpu: ``int``
+
+ :rtype: ``None``
+ """
+ self._validate_vm_cpu(vm_cpu)
+ self._change_vm_cpu(vapp_or_vm_id, vm_cpu)
+
+ def ex_set_vm_memory(self, vapp_or_vm_id, vm_memory):
+ """
+ Sets the virtual memory in MB to allocate for the specified VM or
+ VMs under the vApp. If the vapp_or_vm_id param represents a link
+ to an vApp all VMs that are attached to this vApp will be modified.
+
+ Please ensure that hot-change of virtual memory is enabled for the
+ powered on virtual machines. Otherwise use this method on undeployed
+ vApp.
+
+ :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If
+ a vApp ID is used here all attached VMs
+ will be modified
+ :type vapp_or_vm_id: ``str``
+
+ :keyword vm_memory: virtual memory in MB to allocate for the
+ specified VM or VMs
+ :type vm_memory: ``int``
+
+ :rtype: ``None``
+ """
+ self._validate_vm_memory(vm_memory)
+ self._change_vm_memory(vapp_or_vm_id, vm_memory)
+
+ def ex_add_vm_disk(self, vapp_or_vm_id, vm_disk_size):
+ """
+ Adds a virtual disk to the specified VM or VMs under the vApp. If the
+ vapp_or_vm_id param represents a link to an vApp all VMs that are
+ attached to this vApp will be modified.
+
+ :keyword vapp_or_vm_id: vApp or VM ID that will be modified. If a
+ vApp ID is used here all attached VMs
+ will be modified
+ :type vapp_or_vm_id: ``str``
+
+ :keyword vm_disk_size: the disk capacity in GB that will be added
+ to the specified VM or VMs
+ :type vm_disk_size: ``int``
+
+ :rtype: ``None``
+ """
+ self._validate_vm_disk_size(vm_disk_size)
+ self._add_vm_disk(vapp_or_vm_id, vm_disk_size)
+
+ @staticmethod
+ def _validate_vm_names(names):
+ if names is None:
+ return
+ hname_re = re.compile(
+ '^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9]*)[\-])*([A-Za-z]|[A-Za-z][A-Za-z0-9]*[A-Za-z0-9])$') # NOQA
+ for name in names:
+ if len(name) > 15:
+ raise ValueError(
+ 'The VM name "' + name + '" is too long for the computer '
+ 'name (max 15 chars allowed).')
+ if not hname_re.match(name):
+ raise ValueError('The VM name "' + name + '" can not be '
+ 'used. "' + name + '" is not a valid '
+ 'computer name for the VM.')
+
+ @staticmethod
+ def _validate_vm_memory(vm_memory):
+ if vm_memory is None:
+ return
+ elif vm_memory not in VIRTUAL_MEMORY_VALS:
+ raise ValueError(
+ '%s is not a valid vApp VM memory value' % vm_memory)
+
+ @staticmethod
+ def _validate_vm_cpu(vm_cpu):
+ if vm_cpu is None:
+ return
+ elif vm_cpu not in VIRTUAL_CPU_VALS_1_5:
+ raise ValueError('%s is not a valid vApp VM CPU value' % vm_cpu)
+
+ @staticmethod
+ def _validate_vm_disk_size(vm_disk):
+ if vm_disk is None:
+ return
+ elif int(vm_disk) < 0:
+ raise ValueError('%s is not a valid vApp VM disk space value',
+ vm_disk)
+
+ @staticmethod
+ def _validate_vm_script(vm_script):
+ if vm_script is None:
+ return
+ # Try to locate the script file
+ if not os.path.isabs(vm_script):
+ vm_script = os.path.expanduser(vm_script)
+ vm_script = os.path.abspath(vm_script)
+ if not os.path.isfile(vm_script):
+ raise LibcloudError(
+ "%s the VM script file does not exist" % vm_script)
+ try:
+ open(vm_script).read()
+ except:
+ raise
+ return vm_script
+
+ @staticmethod
+ def _validate_vm_fence(vm_fence):
+ if vm_fence is None:
+ return
+ elif vm_fence not in FENCE_MODE_VALS_1_5:
+ raise ValueError('%s is not a valid fencing mode value' % vm_fence)
+
+ @staticmethod
+ def _validate_vm_ipmode(vm_ipmode):
+ if vm_ipmode is None:
+ return
+ elif vm_ipmode == 'MANUAL':
+ raise NotImplementedError(
+ 'MANUAL IP mode: The interface for supplying '
+ 'IPAddress does not exist yet')
+ elif vm_ipmode not in IP_MODE_VALS_1_5:
+ raise ValueError(
+ '%s is not a valid IP address allocation mode value'
+ % vm_ipmode)
+
+ def _change_vm_names(self, vapp_or_vm_id, vm_names):
+ if vm_names is None:
+ return
+
+ vms = self._get_vm_elements(vapp_or_vm_id)
+ for i, vm in enumerate(vms):
+ if len(vm_names) <= i:
+ return
+
+ # Get GuestCustomizationSection
+ res = self.connection.request(
+ '%s/guestCustomizationSection' % get_url_path(vm.get('href')))
+
+ # Update GuestCustomizationSection
+ res.object.find(
+ fixxpath(res.object, 'ComputerName')).text = vm_names[i]
+ # Remove AdminPassword from customization section
+ admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword'))
+ if admin_pass is not None:
+ res.object.remove(admin_pass)
+
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.guestCustomizationSection+xml'
+ }
+ res = self.connection.request(
+ '%s/guestCustomizationSection' % get_url_path(vm.get('href')),
+ data=ET.tostring(res.object),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ # Update Vm name
+ req_xml = ET.Element("Vm", {
+ 'name': vm_names[i],
+ 'xmlns': "http://www.vmware.com/vcloud/v1.5"})
+ res = self.connection.request(
+ get_url_path(vm.get('href')),
+ data=ET.tostring(req_xml),
+ method='PUT',
+ headers={
+ 'Content-Type': 'application/vnd.vmware.vcloud.vm+xml'}
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def _change_vm_cpu(self, vapp_or_vm_id, vm_cpu):
+ if vm_cpu is None:
+ return
+
+ vms = self._get_vm_elements(vapp_or_vm_id)
+ for vm in vms:
+ # Get virtualHardwareSection/cpu section
+ res = self.connection.request(
+ '%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href')))
+
+ # Update VirtualQuantity field
+ xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
+ 'CIM_ResourceAllocationSettingData}VirtualQuantity')
+ res.object.find(xpath).text = str(vm_cpu)
+
+ headers = {
+ 'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml'
+ }
+ res = self.connection.request(
+ '%s/virtualHardwareSection/cpu' % get_url_path(vm.get('href')),
+ data=ET.tostring(res.object),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def _change_vm_memory(self, vapp_or_vm_id, vm_memory):
+ if vm_memory is None:
+ return
+
+ vms = self._get_vm_elements(vapp_or_vm_id)
+ for vm in vms:
+ # Get virtualHardwareSection/memory section
+ res = self.connection.request(
+ '%s/virtualHardwareSection/memory' %
+ get_url_path(vm.get('href')))
+
+ # Update VirtualQuantity field
+ xpath = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
+ 'CIM_ResourceAllocationSettingData}VirtualQuantity')
+ res.object.find(xpath).text = str(vm_memory)
+
+ headers = {
+ 'Content-Type': 'application/vnd.vmware.vcloud.rasdItem+xml'
+ }
+ res = self.connection.request(
+ '%s/virtualHardwareSection/memory' % get_url_path(
+ vm.get('href')),
+ data=ET.tostring(res.object),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def _add_vm_disk(self, vapp_or_vm_id, vm_disk):
+ if vm_disk is None:
+ return
+
+ rasd_ns = ('{http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/'
+ 'CIM_ResourceAllocationSettingData}')
+
+ vms = self._get_vm_elements(vapp_or_vm_id)
+ for vm in vms:
+ # Get virtualHardwareSection/disks section
+ res = self.connection.request(
+ '%s/virtualHardwareSection/disks' %
+ get_url_path(vm.get('href')))
+
+ existing_ids = []
+ new_disk = None
+ for item in res.object.findall(fixxpath(res.object, 'Item')):
+ # Clean Items from unnecessary stuff
+ for elem in item:
+ if elem.tag == '%sInstanceID' % rasd_ns:
+ existing_ids.append(int(elem.text))
+ if elem.tag in ['%sAddressOnParent' % rasd_ns,
+ '%sParent' % rasd_ns]:
+ item.remove(elem)
+ if item.find('%sHostResource' % rasd_ns) is not None:
+ new_disk = item
+
+ new_disk = copy.deepcopy(new_disk)
+ disk_id = max(existing_ids) + 1
+ new_disk.find('%sInstanceID' % rasd_ns).text = str(disk_id)
+ new_disk.find('%sElementName' %
+ rasd_ns).text = 'Hard Disk ' + str(disk_id)
+ new_disk.find('%sHostResource' % rasd_ns).set(
+ fixxpath(new_disk, 'capacity'), str(int(vm_disk) * 1024))
+ res.object.append(new_disk)
+
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.rasditemslist+xml'
+ }
+ res = self.connection.request(
+ '%s/virtualHardwareSection/disks' % get_url_path(
+ vm.get('href')),
+ data=ET.tostring(res.object),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def _change_vm_script(self, vapp_or_vm_id, vm_script):
+ if vm_script is None:
+ return
+
+ vms = self._get_vm_elements(vapp_or_vm_id)
+ try:
+ script = open(vm_script).read()
+ except:
+ return
+
+ # ElementTree escapes script characters automatically. Escape
+ # requirements:
+ # http://www.vmware.com/support/vcd/doc/rest-api-doc-1.5-html/types/
+ # GuestCustomizationSectionType.html
+ for vm in vms:
+ # Get GuestCustomizationSection
+ res = self.connection.request(
+ '%s/guestCustomizationSection' % get_url_path(vm.get('href')))
+
+ # Attempt to update any existing CustomizationScript element
+ try:
+ res.object.find(
+ fixxpath(res.object, 'CustomizationScript')).text = script
+ except:
+ # CustomizationScript section does not exist, insert it just
+ # before ComputerName
+ for i, e in enumerate(res.object):
+ if e.tag == \
+ '{http://www.vmware.com/vcloud/v1.5}ComputerName':
+ break
+ e = ET.Element(
+ '{http://www.vmware.com/vcloud/v1.5}CustomizationScript')
+ e.text = script
+ res.object.insert(i, e)
+
+ # Remove AdminPassword from customization section due to an API
+ # quirk
+ admin_pass = res.object.find(fixxpath(res.object, 'AdminPassword'))
+ if admin_pass is not None:
+ res.object.remove(admin_pass)
+
+ # Update VM's GuestCustomizationSection
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.guestCustomizationSection+xml'
+ }
+ res = self.connection.request(
+ '%s/guestCustomizationSection' % get_url_path(vm.get('href')),
+ data=ET.tostring(res.object),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def _change_vm_ipmode(self, vapp_or_vm_id, vm_ipmode):
+ if vm_ipmode is None:
+ return
+
+ vms = self._get_vm_elements(vapp_or_vm_id)
+
+ for vm in vms:
+ res = self.connection.request(
+ '%s/networkConnectionSection' % get_url_path(vm.get('href')))
+ net_conns = res.object.findall(
+ fixxpath(res.object, 'NetworkConnection'))
+ for c in net_conns:
+ c.find(fixxpath(c, 'IpAddressAllocationMode')).text = vm_ipmode
+
+ headers = {
+ 'Content-Type':
+ 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+ }
+
+ res = self.connection.request(
+ '%s/networkConnectionSection' % get_url_path(vm.get('href')),
+ data=ET.tostring(res.object),
+ method='PUT',
+ headers=headers
+ )
+ self._wait_for_task_completion(res.object.get('href'))
+
+ def _get_network_href(self, network_name):
+ network_href = None
+
+ # Find the organisation's network href
+ res = self.connection.request(self.org)
+ links = res.object.findall(fixxpath(res.object, 'Link'))
+ for l in links:
+ if l.attrib['type'] == \
+ 'application/vnd.vmware.vcloud.orgNetwork+xml' \
+ and l.attrib['name'] == network_name:
+ network_href = l.attrib['href']
+
+ if network_href is None:
+ raise ValueError(
+ '%s is not a valid organisation network name' % network_name)
+ else:
+ return network_href
+
+ def _get_vm_elements(self, vapp_or_vm_id):
+ res = self.connection.request(get_url_path(vapp_or_vm_id))
+ if res.object.tag.endswith('VApp'):
+ vms = res.object.findall(fixxpath(res.object, 'Children/Vm'))
+ elif res.object.tag.endswith('Vm'):
+ vms = [res.object]
+ else:
+ raise ValueError(
+ 'Specified ID value is not a valid VApp or Vm identifier.')
+ return vms
+
+ def _is_node(self, node_or_image):
+ return isinstance(node_or_image, Node)
+
+ def _to_node(self, node_elm):
+ # Parse VMs as extra field
+ vms = []
+ for vm_elem in node_elm.findall(fixxpath(node_elm, 'Children/Vm')):
+ public_ips = []
+ private_ips = []
+
+ xpath = fixxpath(vm_elem,
+ 'NetworkConnectionSection/NetworkConnection')
+ for connection in vm_elem.findall(xpath):
+ ip = connection.find(fixxpath(connection, "IpAddress"))
+ if ip is not None:
+ private_ips.append(ip.text)
+ external_ip = connection.find(
+ fixxpath(connection, "ExternalIpAddress"))
+ if external_ip is not None:
+ public_ips.append(external_ip.text)
+ elif ip is not None:
+ public_ips.append(ip.text)
+
+ xpath = ('{http://schemas.dmtf.org/ovf/envelope/1}'
+ 'OperatingSystemSection')
+ os_type_elem = vm_elem.find(xpath)
+ if os_type_elem is not None:
+ os_type = os_type_elem.get(
+ '{http://www.vmware.com/schema/ovf}osType')
+ else:
+ os_type = None
+ vm = {
+ 'id': vm_elem.get('href'),
+ 'name': vm_elem.get('name'),
+ 'state': self.NODE_STATE_MAP[vm_elem.get('status')],
+ 'public_ips': public_ips,
+ 'private_ips': private_ips,
+ 'os_type': os_type
+ }
+ vms.append(vm)
+
+ # Take the node IP addresses from all VMs
+ public_ips = []
+ private_ips = []
+ for vm in vms:
+ public_ips.extend(vm['public_ips'])
+ private_ips.extend(vm['private_ips'])
+
+ # Find vDC
+ vdc_id = next(link.get('href') for link
+ in node_elm.findall(fixxpath(node_elm, 'Link'))
+ if link.get('type') ==
+ 'application/vnd.vmware.vcloud.vdc+xml')
+ vdc = next(vdc for vdc in self.vdcs if vdc.id == vdc_id)
+
+ node = Node(id=node_elm.get('href'),
+ name=node_elm.get('name'),
+ state=self.NODE_STATE_MAP[node_elm.get('status')],
+ public_ips=public_ips,
+ private_ips=private_ips,
+ driver=self.connection.driver,
+ extra={'vdc': vdc.name, 'vms': vms})
+ return node
+
+ def _to_vdc(self, vdc_elm):
+
+ def get_capacity_values(capacity_elm):
+ if capacity_elm is None:
+ return None
+ limit = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Limit')))
+ used = int(capacity_elm.findtext(fixxpath(capacity_elm, 'Used')))
+ units = capacity_elm.findtext(fixxpath(capacity_elm, 'Units'))
+ return Capacity(limit, used, units)
+
+ cpu = get_capacity_values(
+ vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Cpu')))
+ memory = get_capacity_values(
+ vdc_elm.find(fixxpath(vdc_elm, 'ComputeCapacity/Memory')))
+ storage = get_capacity_values(
+ vdc_elm.find(fixxpath(vdc_elm, 'StorageCapacity')))
+
+ return Vdc(id=vdc_elm.get('href'),
+ name=vdc_elm.get('name'),
+ driver=self,
+ allocation_model=vdc_elm.findtext(
+ fixxpath(vdc_elm, 'AllocationModel')),
+ cpu=cpu,
+ memory=memory,
+ storage=storage)
+
+
+class VCloud_5_1_NodeDriver(VCloud_1_5_NodeDriver):
+
+ @staticmethod
+ def _validate_vm_memory(vm_memory):
+ if vm_memory is None:
+ return None
+ elif (vm_memory % 4) != 0:
+ # The vcd 5.1 virtual machine memory size must be a multiple of 4
+ # MB
+ raise ValueError(
+ '%s is not a valid vApp VM memory value' % (vm_memory))
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/voxel.py b/awx/lib/site-packages/libcloud/compute/drivers/voxel.py
new file mode 100644
index 0000000000..98650272d1
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/voxel.py
@@ -0,0 +1,307 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Voxel VoxCloud driver
+"""
+import datetime
+import hashlib
+
+from libcloud.utils.py3 import b
+
+from libcloud.common.base import XmlResponse, ConnectionUserAndKey
+from libcloud.common.types import InvalidCredsError
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState
+from libcloud.compute.base import Node, NodeDriver
+from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
+
+VOXEL_API_HOST = "api.voxel.net"
+
+
+class VoxelResponse(XmlResponse):
+ def __init__(self, response, connection):
+ self.parsed = None
+ super(VoxelResponse, self).__init__(response=response,
+ connection=connection)
+
+ def parse_body(self):
+ if not self.body:
+ return None
+ if not self.parsed:
+ self.parsed = super(VoxelResponse, self).parse_body()
+ return self.parsed
+
+ def parse_error(self):
+ err_list = []
+ if not self.body:
+ return None
+ if not self.parsed:
+ self.parsed = super(VoxelResponse, self).parse_body()
+ for err in self.parsed.findall('err'):
+ code = err.get('code')
+ err_list.append("(%s) %s" % (code, err.get('msg')))
+ # From voxel docs:
+ # 1: Invalid login or password
+ # 9: Permission denied: user lacks access rights for this method
+ if code == "1" or code == "9":
+ # sucks, but only way to detect
+ # bad authentication tokens so far
+ raise InvalidCredsError(err_list[-1])
+ return "\n".join(err_list)
+
+ def success(self):
+ if not self.parsed:
+ self.parsed = super(VoxelResponse, self).parse_body()
+ stat = self.parsed.get('stat')
+ if stat != "ok":
+ return False
+ return True
+
+
+class VoxelConnection(ConnectionUserAndKey):
+ """
+ Connection class for the Voxel driver
+ """
+
+ host = VOXEL_API_HOST
+ responseCls = VoxelResponse
+
+ def add_default_params(self, params):
+ params = dict([(k, v) for k, v in list(params.items())
+ if v is not None])
+ params["key"] = self.user_id
+ params["timestamp"] = datetime.datetime.utcnow().isoformat() + "+0000"
+
+ keys = list(params.keys())
+ keys.sort()
+
+ md5 = hashlib.md5()
+ md5.update(b(self.key))
+ for key in keys:
+ if params[key]:
+ if not params[key] is None:
+ md5.update(b("%s%s" % (key, params[key])))
+ else:
+ md5.update(b(key))
+ params['api_sig'] = md5.hexdigest()
+ return params
+
+VOXEL_INSTANCE_TYPES = {}
+RAM_PER_CPU = 2048
+
+NODE_STATE_MAP = {
+ 'IN_PROGRESS': NodeState.PENDING,
+ 'QUEUED': NodeState.PENDING,
+ 'SUCCEEDED': NodeState.RUNNING,
+ 'shutting-down': NodeState.TERMINATED,
+ 'terminated': NodeState.TERMINATED,
+ 'unknown': NodeState.UNKNOWN,
+}
+
+
+class VoxelNodeDriver(NodeDriver):
+ """
+ Voxel VoxCLOUD node driver
+ """
+
+ connectionCls = VoxelConnection
+ type = Provider.VOXEL
+ name = 'Voxel VoxCLOUD'
+ website = 'http://www.voxel.net/'
+
+ def _initialize_instance_types():
+ for cpus in range(1, 14):
+ if cpus == 1:
+ name = "Single CPU"
+ else:
+ name = "%d CPUs" % cpus
+ id = "%dcpu" % cpus
+ ram = cpus * RAM_PER_CPU
+
+ VOXEL_INSTANCE_TYPES[id] = {
+ 'id': id,
+ 'name': name,
+ 'ram': ram,
+ 'disk': None,
+ 'bandwidth': None,
+ 'price': None}
+
+ features = {"create_node": [],
+ "list_sizes": ["variable_disk"]}
+
+ _initialize_instance_types()
+
+ def list_nodes(self):
+ params = {"method": "voxel.devices.list"}
+ result = self.connection.request('/', params=params).object
+ return self._to_nodes(result)
+
+ def list_sizes(self, location=None):
+ return [NodeSize(driver=self.connection.driver, **i)
+ for i in list(VOXEL_INSTANCE_TYPES.values())]
+
+ def list_images(self, location=None):
+ params = {"method": "voxel.images.list"}
+ result = self.connection.request('/', params=params).object
+ return self._to_images(result)
+
+ def create_node(self, **kwargs):
+ """Create Voxel Node
+
+ :keyword name: the name to assign the node (mandatory)
+ :type name: ``str``
+
+ :keyword image: distribution to deploy
+ :type image: :class:`NodeImage`
+
+ :keyword size: the plan size to create (mandatory)
+ Requires size.disk (GB) to be set manually
+ :type size: :class:`NodeSize`
+
+ :keyword location: which datacenter to create the node in
+ :type location: :class:`NodeLocation`
+
+ :keyword ex_privateip: Backend IP address to assign to node;
+ must be chosen from the customer's
+ private VLAN assignment.
+ :type ex_privateip: ``str``
+
+ :keyword ex_publicip: Public-facing IP address to assign to node;
+ must be chosen from the customer's
+ public VLAN assignment.
+ :type ex_publicip: ``str``
+
+ :keyword ex_rootpass: Password for root access; generated if unset.
+ :type ex_rootpass: ``str``
+
+ :keyword ex_consolepass: Password for remote console;
+ generated if unset.
+ :type ex_consolepass: ``str``
+
+ :keyword ex_sshuser: Username for SSH access
+ :type ex_sshuser: ``str``
+
+ :keyword ex_sshpass: Password for SSH access; generated if unset.
+ :type ex_sshpass: ``str``
+
+ :keyword ex_voxel_access: Allow access Voxel administrative access.
+ Defaults to False.
+ :type ex_voxel_access: ``bool``
+
+ :rtype: :class:`Node` or ``None``
+ """
+
+ # assert that disk > 0
+ if not kwargs["size"].disk:
+ raise ValueError("size.disk must be non-zero")
+
+ # convert voxel_access to string boolean if needed
+ voxel_access = kwargs.get("ex_voxel_access", None)
+ if voxel_access is not None:
+ voxel_access = "true" if voxel_access else "false"
+
+ params = {
+ 'method': 'voxel.voxcloud.create',
+ 'hostname': kwargs["name"],
+ 'disk_size': int(kwargs["size"].disk),
+ 'facility': kwargs["location"].id,
+ 'image_id': kwargs["image"].id,
+ 'processing_cores': kwargs["size"].ram / RAM_PER_CPU,
+ 'backend_ip': kwargs.get("ex_privateip", None),
+ 'frontend_ip': kwargs.get("ex_publicip", None),
+ 'admin_password': kwargs.get("ex_rootpass", None),
+ 'console_password': kwargs.get("ex_consolepass", None),
+ 'ssh_username': kwargs.get("ex_sshuser", None),
+ 'ssh_password': kwargs.get("ex_sshpass", None),
+ 'voxel_access': voxel_access,
+ }
+
+ object = self.connection.request('/', params=params).object
+
+ if self._getstatus(object):
+ return Node(
+ id=object.findtext("device/id"),
+ name=kwargs["name"],
+ state=NODE_STATE_MAP[object.findtext("device/status")],
+ public_ips=kwargs.get("publicip", None),
+ private_ips=kwargs.get("privateip", None),
+ driver=self.connection.driver
+ )
+ else:
+ return None
+
+ def reboot_node(self, node):
+ params = {'method': 'voxel.devices.power',
+ 'device_id': node.id,
+ 'power_action': 'reboot'}
+ return self._getstatus(
+ self.connection.request('/', params=params).object)
+
+ def destroy_node(self, node):
+ params = {'method': 'voxel.voxcloud.delete',
+ 'device_id': node.id}
+ return self._getstatus(
+ self.connection.request('/', params=params).object)
+
+ def list_locations(self):
+ params = {"method": "voxel.voxcloud.facilities.list"}
+ result = self.connection.request('/', params=params).object
+ nodes = self._to_locations(result)
+ return nodes
+
+ def _getstatus(self, element):
+ status = element.attrib["stat"]
+ return status == "ok"
+
+ def _to_locations(self, object):
+ return [NodeLocation(element.attrib["label"],
+ element.findtext("description"),
+ element.findtext("description"),
+ self)
+ for element in object.findall('facilities/facility')]
+
+ def _to_nodes(self, object):
+ nodes = []
+ for element in object.findall('devices/device'):
+ if element.findtext("type") == "Virtual Server":
+ try:
+ state = self.NODE_STATE_MAP[element.attrib['status']]
+ except KeyError:
+ state = NodeState.UNKNOWN
+
+ public_ip = private_ip = None
+ ipassignments = element.findall("ipassignments/ipassignment")
+ for ip in ipassignments:
+ if ip.attrib["type"] == "frontend":
+ public_ip = ip.text
+ elif ip.attrib["type"] == "backend":
+ private_ip = ip.text
+
+ nodes.append(Node(id=element.attrib['id'],
+ name=element.attrib['label'],
+ state=state,
+ public_ips=public_ip,
+ private_ips=private_ip,
+ driver=self.connection.driver))
+ return nodes
+
+ def _to_images(self, object):
+ images = []
+ for element in object.findall("images/image"):
+ images.append(NodeImage(id=element.attrib["id"],
+ name=element.attrib["summary"],
+ driver=self.connection.driver))
+ return images
diff --git a/awx/lib/site-packages/libcloud/compute/drivers/vpsnet.py b/awx/lib/site-packages/libcloud/compute/drivers/vpsnet.py
new file mode 100644
index 0000000000..8d026a8c4a
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/drivers/vpsnet.py
@@ -0,0 +1,193 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+VPS.net driver
+"""
+import base64
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.py3 import b
+
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse
+from libcloud.common.types import InvalidCredsError, MalformedResponseError
+from libcloud.compute.providers import Provider
+from libcloud.compute.types import NodeState
+from libcloud.compute.base import Node, NodeDriver
+from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
+
+API_HOST = 'api.vps.net'
+API_VERSION = 'api10json'
+
+RAM_PER_NODE = 256
+DISK_PER_NODE = 10
+BANDWIDTH_PER_NODE = 250
+
+
+class VPSNetResponse(JsonResponse):
+ def parse_body(self):
+ try:
+ return super(VPSNetResponse, self).parse_body()
+ except MalformedResponseError:
+ return self.body
+
+ def success(self):
+ # vps.net wrongly uses 406 for invalid auth creds
+ if self.status == 406 or self.status == 403:
+ raise InvalidCredsError()
+ return True
+
+ def parse_error(self):
+ try:
+ errors = super(VPSNetResponse, self).parse_body()['errors'][0]
+ except MalformedResponseError:
+ return self.body
+ else:
+ return "\n".join(errors)
+
+
+class VPSNetConnection(ConnectionUserAndKey):
+ """
+ Connection class for the VPS.net driver
+ """
+
+ host = API_HOST
+ responseCls = VPSNetResponse
+
+ allow_insecure = False
+
+ def add_default_headers(self, headers):
+ user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
+ headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
+ return headers
+
+
+class VPSNetNodeDriver(NodeDriver):
+ """
+ VPS.net node driver
+ """
+
+ type = Provider.VPSNET
+ api_name = 'vps_net'
+ name = "vps.net"
+ website = 'http://vps.net/'
+ connectionCls = VPSNetConnection
+
+ def _to_node(self, vm):
+ if vm['running']:
+ state = NodeState.RUNNING
+ else:
+ state = NodeState.PENDING
+
+ n = Node(id=vm['id'],
+ name=vm['label'],
+ state=state,
+ public_ips=[vm.get('primary_ip_address', None)],
+ private_ips=[],
+ extra={'slices_count': vm['slices_count']},
+ # Number of nodes consumed by VM
+ driver=self.connection.driver)
+ return n
+
+ def _to_image(self, image, cloud):
+ image = NodeImage(id=image['id'],
+ name="%s: %s" % (cloud, image['label']),
+ driver=self.connection.driver)
+
+ return image
+
+ def _to_size(self, num):
+ size = NodeSize(id=num,
+ name="%d Node" % (num,),
+ ram=RAM_PER_NODE * num,
+ disk=DISK_PER_NODE,
+ bandwidth=BANDWIDTH_PER_NODE * num,
+ price=self._get_price_per_node(num) * num,
+ driver=self.connection.driver)
+ return size
+
+ def _get_price_per_node(self, num):
+ single_node_price = self._get_size_price(size_id='1')
+ return num * single_node_price
+
+ def create_node(self, name, image, size, **kwargs):
+ """Create a new VPS.net node
+
+ @inherits: :class:`NodeDriver.create_node`
+
+ :keyword ex_backups_enabled: Enable automatic backups
+ :type ex_backups_enabled: ``bool``
+
+ :keyword ex_fqdn: Fully Qualified domain of the node
+ :type ex_fqdn: ``str``
+ """
+ headers = {'Content-Type': 'application/json'}
+ request = {'virtual_machine':
+ {'label': name,
+ 'fqdn': kwargs.get('ex_fqdn', ''),
+ 'system_template_id': image.id,
+ 'backups_enabled': kwargs.get('ex_backups_enabled', 0),
+ 'slices_required': size.id}}
+
+ res = self.connection.request('/virtual_machines.%s' % (API_VERSION,),
+ data=json.dumps(request),
+ headers=headers,
+ method='POST')
+ node = self._to_node(res.object['virtual_machine'])
+ return node
+
+ def reboot_node(self, node):
+ res = self.connection.request(
+ '/virtual_machines/%s/%s.%s' % (node.id,
+ 'reboot',
+ API_VERSION),
+ method="POST")
+ node = self._to_node(res.object['virtual_machine'])
+ return True
+
+ def list_sizes(self, location=None):
+ res = self.connection.request('/nodes.%s' % (API_VERSION,))
+ available_nodes = len([size for size in res.object
+ if size['slice']['virtual_machine_id']])
+ sizes = [self._to_size(i) for i in range(1, available_nodes + 1)]
+ return sizes
+
+ def destroy_node(self, node):
+ res = self.connection.request('/virtual_machines/%s.%s'
+ % (node.id, API_VERSION),
+ method='DELETE')
+ return res.status == 200
+
+ def list_nodes(self):
+ res = self.connection.request('/virtual_machines.%s' % (API_VERSION,))
+ return [self._to_node(i['virtual_machine']) for i in res.object]
+
+ def list_images(self, location=None):
+ res = self.connection.request('/available_clouds.%s' % (API_VERSION,))
+
+ images = []
+ for cloud in res.object:
+ label = cloud['cloud']['label']
+ templates = cloud['cloud']['system_templates']
+ images.extend([self._to_image(image, label)
+ for image in templates])
+
+ return images
+
+ def list_locations(self):
+ return [NodeLocation(0, "VPS.net Western US", 'US', self)]
diff --git a/awx/lib/site-packages/libcloud/compute/providers.py b/awx/lib/site-packages/libcloud/compute/providers.py
new file mode 100644
index 0000000000..87b3f321d0
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/providers.py
@@ -0,0 +1,175 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Provider related utilities
+"""
+
+from libcloud.utils.misc import get_driver as _get_provider_driver
+from libcloud.utils.misc import set_driver as _set_provider_driver
+from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS
+from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING
+
+__all__ = [
+ "Provider",
+ "DRIVERS",
+ "get_driver"]
+
+DRIVERS = {
+ Provider.DUMMY:
+ ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'),
+ Provider.EC2_US_EAST:
+ ('libcloud.compute.drivers.ec2', 'EC2NodeDriver'),
+ Provider.EC2_EU_WEST:
+ ('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'),
+ Provider.EC2_US_WEST:
+ ('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'),
+ Provider.EC2_US_WEST_OREGON:
+ ('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'),
+ Provider.EC2_AP_SOUTHEAST:
+ ('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'),
+ Provider.EC2_AP_NORTHEAST:
+ ('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'),
+ Provider.EC2_SA_EAST:
+ ('libcloud.compute.drivers.ec2', 'EC2SAEastNodeDriver'),
+ Provider.EC2_AP_SOUTHEAST2:
+ ('libcloud.compute.drivers.ec2', 'EC2APSESydneyNodeDriver'),
+ Provider.ECP:
+ ('libcloud.compute.drivers.ecp', 'ECPNodeDriver'),
+ Provider.ELASTICHOSTS:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'),
+ Provider.ELASTICHOSTS_UK1:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'),
+ Provider.ELASTICHOSTS_UK2:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'),
+ Provider.ELASTICHOSTS_US1:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'),
+ Provider.ELASTICHOSTS_US2:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS2NodeDriver'),
+ Provider.ELASTICHOSTS_US3:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS3NodeDriver'),
+ Provider.ELASTICHOSTS_CA1:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsCA1NodeDriver'),
+ Provider.ELASTICHOSTS_AU1:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsAU1NodeDriver'),
+ Provider.ELASTICHOSTS_CN1:
+ ('libcloud.compute.drivers.elastichosts', 'ElasticHostsCN1NodeDriver'),
+ Provider.SKALICLOUD:
+ ('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'),
+ Provider.SERVERLOVE:
+ ('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'),
+ Provider.CLOUDSIGMA:
+ ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'),
+ Provider.GCE:
+ ('libcloud.compute.drivers.gce', 'GCENodeDriver'),
+ Provider.GOGRID:
+ ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'),
+ Provider.RACKSPACE:
+ ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'),
+ Provider.RACKSPACE_FIRST_GEN:
+ ('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'),
+ Provider.HPCLOUD:
+ ('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'),
+ Provider.KILI:
+ ('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'),
+ Provider.VPSNET:
+ ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'),
+ Provider.LINODE:
+ ('libcloud.compute.drivers.linode', 'LinodeNodeDriver'),
+ Provider.RIMUHOSTING:
+ ('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'),
+ Provider.VOXEL:
+ ('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'),
+ Provider.SOFTLAYER:
+ ('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'),
+ Provider.EUCALYPTUS:
+ ('libcloud.compute.drivers.ec2', 'EucNodeDriver'),
+ Provider.IBM:
+ ('libcloud.compute.drivers.ibm_sce', 'IBMNodeDriver'),
+ Provider.OPENNEBULA:
+ ('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'),
+ Provider.DREAMHOST:
+ ('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'),
+ Provider.BRIGHTBOX:
+ ('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'),
+ Provider.NIMBUS:
+ ('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'),
+ Provider.BLUEBOX:
+ ('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'),
+ Provider.GANDI:
+ ('libcloud.compute.drivers.gandi', 'GandiNodeDriver'),
+ Provider.OPSOURCE:
+ ('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'),
+ Provider.OPENSTACK:
+ ('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'),
+ Provider.NINEFOLD:
+ ('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'),
+ Provider.VCLOUD:
+ ('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'),
+ Provider.TERREMARK:
+ ('libcloud.compute.drivers.vcloud', 'TerremarkDriver'),
+ Provider.CLOUDSTACK:
+ ('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'),
+ Provider.LIBVIRT:
+ ('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'),
+ Provider.JOYENT:
+ ('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'),
+ Provider.VCL:
+ ('libcloud.compute.drivers.vcl', 'VCLNodeDriver'),
+ Provider.KTUCLOUD:
+ ('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'),
+ Provider.HOSTVIRTUAL:
+ ('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'),
+ Provider.ABIQUO:
+ ('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'),
+ Provider.DIGITAL_OCEAN:
+ ('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'),
+ Provider.NEPHOSCALE:
+ ('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'),
+ Provider.CLOUDFRAMES:
+ ('libcloud.compute.drivers.cloudframes', 'CloudFramesNodeDriver'),
+ Provider.EXOSCALE:
+ ('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'),
+ Provider.IKOULA:
+ ('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'),
+ Provider.OUTSCALE_SAS:
+ ('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'),
+ Provider.OUTSCALE_INC:
+ ('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'),
+
+ # Deprecated
+ Provider.CLOUDSIGMA_US:
+ ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'),
+}
+
+
+def get_driver(provider):
+ if provider in DEPRECATED_RACKSPACE_PROVIDERS:
+ id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()])
+ old_name = id_to_name_map[provider]
+ new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]]
+
+ url = 'http://s.apache.org/lc0140un'
+ msg = ('Provider constant %s has been removed. New constant '
+ 'is now called %s.\n'
+ 'For more information on this change and how to modify your '
+ 'code to work with it, please visit: %s' %
+ (old_name, new_name, url))
+ raise Exception(msg)
+
+ return _get_provider_driver(DRIVERS, provider)
+
+
+def set_driver(provider, module, klass):
+ return _set_provider_driver(DRIVERS, provider, module, klass)
diff --git a/awx/lib/site-packages/libcloud/compute/ssh.py b/awx/lib/site-packages/libcloud/compute/ssh.py
new file mode 100644
index 0000000000..af9c43a529
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/ssh.py
@@ -0,0 +1,530 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Wraps multiple ways to communicate over SSH.
+"""
+
+have_paramiko = False
+
+try:
+ import paramiko
+ have_paramiko = True
+except ImportError:
+ pass
+
+# Depending on your version of Paramiko, it may cause a deprecation
+# warning on Python 2.6.
+# Ref: https://bugs.launchpad.net/paramiko/+bug/392973
+
+import os
+import time
+import subprocess
+import logging
+import warnings
+
+from os.path import split as psplit
+from os.path import join as pjoin
+
+from libcloud.utils.logging import ExtraLogFormatter
+from libcloud.utils.py3 import StringIO
+
+__all__ = [
+ 'BaseSSHClient',
+ 'ParamikoSSHClient',
+ 'ShellOutSSHClient',
+
+ 'SSHCommandTimeoutError'
+]
+
+
+# Maximum number of bytes to read at once from a socket
+CHUNK_SIZE = 1024
+
+
+class SSHCommandTimeoutError(Exception):
+ """
+ Exception which is raised when an SSH command times out.
+ """
+ def __init__(self, cmd, timeout):
+ self.cmd = cmd
+ self.timeout = timeout
+ message = 'Command didn\'t finish in %s seconds' % (timeout)
+ super(SSHCommandTimeoutError, self).__init__(message)
+
+ def __repr__(self):
+ return ('' %
+ (self.cmd, self.timeout))
+
+ def __str__(self):
+ return self.message
+
+
+class BaseSSHClient(object):
+ """
+ Base class representing a connection over SSH/SCP to a remote node.
+ """
+
+ def __init__(self, hostname, port=22, username='root', password=None,
+ key=None, key_files=None, timeout=None):
+ """
+ :type hostname: ``str``
+ :keyword hostname: Hostname or IP address to connect to.
+
+ :type port: ``int``
+ :keyword port: TCP port to communicate on, defaults to 22.
+
+ :type username: ``str``
+ :keyword username: Username to use, defaults to root.
+
+ :type password: ``str``
+ :keyword password: Password to authenticate with or a password used
+ to unlock a private key if a password protected key
+ is used.
+
+ :param key: Deprecated in favor of ``key_files`` argument.
+
+ :type key_files: ``str`` or ``list``
+ :keyword key_files: A list of paths to the private key files to use.
+ """
+ if key is not None:
+ message = ('You are using deprecated "key" argument which has '
+ 'been replaced with "key_files" argument')
+ warnings.warn(message, DeprecationWarning)
+
+ # key_files has precedent
+ key_files = key if not key_files else key_files
+
+ self.hostname = hostname
+ self.port = port
+ self.username = username
+ self.password = password
+ self.key_files = key_files
+ self.timeout = timeout
+
+ def connect(self):
+ """
+ Connect to the remote node over SSH.
+
+ :return: True if the connection has been successfuly established, False
+ otherwise.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'connect not implemented for this ssh client')
+
+ def put(self, path, contents=None, chmod=None, mode='w'):
+ """
+ Upload a file to the remote node.
+
+ :type path: ``str``
+ :keyword path: File path on the remote node.
+
+ :type contents: ``str``
+ :keyword contents: File Contents.
+
+ :type chmod: ``int``
+ :keyword chmod: chmod file to this after creation.
+
+ :type mode: ``str``
+ :keyword mode: Mode in which the file is opened.
+
+ :return: Full path to the location where a file has been saved.
+ :rtype: ``str``
+ """
+ raise NotImplementedError(
+ 'put not implemented for this ssh client')
+
+ def delete(self, path):
+ """
+ Delete/Unlink a file on the remote node.
+
+ :type path: ``str``
+ :keyword path: File path on the remote node.
+
+ :return: True if the file has been successfuly deleted, False
+ otherwise.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'delete not implemented for this ssh client')
+
+ def run(self, cmd):
+ """
+ Run a command on a remote node.
+
+ :type cmd: ``str``
+ :keyword cmd: Command to run.
+
+ :return ``list`` of [stdout, stderr, exit_status]
+ """
+ raise NotImplementedError(
+ 'run not implemented for this ssh client')
+
+ def close(self):
+ """
+ Shutdown connection to the remote node.
+
+ :return: True if the connection has been successfuly closed, False
+ otherwise.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'close not implemented for this ssh client')
+
+ def _get_and_setup_logger(self):
+ logger = logging.getLogger('libcloud.compute.ssh')
+ path = os.getenv('LIBCLOUD_DEBUG')
+
+ if path:
+ handler = logging.FileHandler(path)
+ handler.setFormatter(ExtraLogFormatter())
+ logger.addHandler(handler)
+ logger.setLevel(logging.DEBUG)
+
+ return logger
+
+
+class ParamikoSSHClient(BaseSSHClient):
+
+ """
+ A SSH Client powered by Paramiko.
+ """
+ def __init__(self, hostname, port=22, username='root', password=None,
+ key=None, key_files=None, key_material=None, timeout=None):
+ """
+ Authentication is always attempted in the following order:
+
+ - The key passed in (if key is provided)
+ - Any key we can find through an SSH agent (only if no password and
+ key is provided)
+ - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (only if no
+ password and key is provided)
+ - Plain username/password auth, if a password was given (if password is
+ provided)
+ """
+ if key_files and key_material:
+ raise ValueError(('key_files and key_material arguments are '
+ 'mutually exclusive'))
+
+ super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port,
+ username=username,
+ password=password,
+ key=key,
+ key_files=key_files,
+ timeout=timeout)
+
+ self.key_material = key_material
+
+ self.client = paramiko.SSHClient()
+ self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.logger = self._get_and_setup_logger()
+
+ def connect(self):
+ conninfo = {'hostname': self.hostname,
+ 'port': self.port,
+ 'username': self.username,
+ 'allow_agent': False,
+ 'look_for_keys': False}
+
+ if self.password:
+ conninfo['password'] = self.password
+
+ if self.key_files:
+ conninfo['key_filename'] = self.key_files
+
+ if self.key_material:
+ conninfo['pkey'] = self._get_pkey_object(key=self.key_material)
+
+ if not self.password and not (self.key_files or self.key_material):
+ conninfo['allow_agent'] = True
+ conninfo['look_for_keys'] = True
+
+ if self.timeout:
+ conninfo['timeout'] = self.timeout
+
+ extra = {'_hostname': self.hostname, '_port': self.port,
+ '_username': self.username, '_timeout': self.timeout}
+ self.logger.debug('Connecting to server', extra=extra)
+
+ self.client.connect(**conninfo)
+ return True
+
+ def put(self, path, contents=None, chmod=None, mode='w'):
+ extra = {'_path': path, '_mode': mode, '_chmod': chmod}
+ self.logger.debug('Uploading file', extra=extra)
+
+ sftp = self.client.open_sftp()
+ # less than ideal, but we need to mkdir stuff otherwise file() fails
+ head, tail = psplit(path)
+
+ if path[0] == "/":
+ sftp.chdir("/")
+ else:
+ # Relative path - start from a home directory (~)
+ sftp.chdir('.')
+
+ for part in head.split("/"):
+ if part != "":
+ try:
+ sftp.mkdir(part)
+ except IOError:
+ # so, there doesn't seem to be a way to
+ # catch EEXIST consistently *sigh*
+ pass
+ sftp.chdir(part)
+
+ cwd = sftp.getcwd()
+
+ ak = sftp.file(tail, mode=mode)
+ ak.write(contents)
+ if chmod is not None:
+ ak.chmod(chmod)
+ ak.close()
+ sftp.close()
+
+ if path[0] == '/':
+ file_path = path
+ else:
+ file_path = pjoin(cwd, path)
+
+ return file_path
+
+ def delete(self, path):
+ extra = {'_path': path}
+ self.logger.debug('Deleting file', extra=extra)
+
+ sftp = self.client.open_sftp()
+ sftp.unlink(path)
+ sftp.close()
+ return True
+
+ def run(self, cmd, timeout=None):
+ """
+ Note: This function is based on paramiko's exec_command()
+ method.
+
+ :param timeout: How long to wait (in seconds) for the command to
+ finish (optional).
+ :type timeout: ``float``
+ """
+ extra = {'_cmd': cmd}
+ self.logger.debug('Executing command', extra=extra)
+
+ # Use the system default buffer size
+ bufsize = -1
+
+ transport = self.client.get_transport()
+ chan = transport.open_session()
+
+ start_time = time.time()
+ chan.exec_command(cmd)
+
+ stdout = StringIO()
+ stderr = StringIO()
+
+ # Create a stdin file and immediately close it to prevent any
+ # interactive script from hanging the process.
+ stdin = chan.makefile('wb', bufsize)
+ stdin.close()
+
+ # Receive all the output
+ # Note #1: This is used instead of chan.makefile approach to prevent
+ # buffering issues and hanging if the executed command produces a lot
+ # of output.
+ #
+ # Note #2: If you are going to remove "ready" checks inside the loop
+ # you are going to have a bad time. Trying to consume from a channel
+ # which is not ready will block for indefinitely.
+ exit_status_ready = chan.exit_status_ready()
+
+ while not exit_status_ready:
+ current_time = time.time()
+ elapsed_time = (current_time - start_time)
+
+ if timeout and (elapsed_time > timeout):
+ # TODO: Is this the right way to clean up?
+ chan.close()
+
+ raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout)
+
+ if chan.recv_ready():
+ data = chan.recv(CHUNK_SIZE)
+
+ while data:
+ stdout.write(data)
+ ready = chan.recv_ready()
+
+ if not ready:
+ break
+
+ data = chan.recv(CHUNK_SIZE)
+
+ if chan.recv_stderr_ready():
+ data = chan.recv_stderr(CHUNK_SIZE)
+
+ while data:
+ stderr.write(data)
+ ready = chan.recv_stderr_ready()
+
+ if not ready:
+ break
+
+ data = chan.recv_stderr(CHUNK_SIZE)
+
+ # We need to check the exist status here, because the command could
+ # print some output and exit during this sleep bellow.
+ exit_status_ready = chan.exit_status_ready()
+
+ if exit_status_ready:
+ break
+
+ # Short sleep to prevent busy waiting
+ time.sleep(1.5)
+
+ # Receive the exit status code of the command we ran.
+ status = chan.recv_exit_status()
+
+ stdout = stdout.getvalue()
+ stderr = stderr.getvalue()
+
+ extra = {'_status': status, '_stdout': stdout, '_stderr': stderr}
+ self.logger.debug('Command finished', extra=extra)
+
+ return [stdout, stderr, status]
+
+ def close(self):
+ self.logger.debug('Closing server connection')
+
+ self.client.close()
+ return True
+
+ def _get_pkey_object(self, key):
+ """
+ Try to detect private key type and return paramiko.PKey object.
+ """
+
+ for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]:
+ try:
+ key = cls.from_private_key(StringIO(key))
+ except paramiko.ssh_exception.SSHException:
+ # Invalid key, try other key type
+ pass
+ else:
+ return key
+
+ msg = 'Invalid or unsupported key type'
+ raise paramiko.ssh_exception.SSHException(msg)
+
+
+class ShellOutSSHClient(BaseSSHClient):
+ """
+ This client shells out to "ssh" binary to run commands on the remote
+ server.
+
+ Note: This client should not be used in production.
+ """
+
+ def __init__(self, hostname, port=22, username='root', password=None,
+ key=None, key_files=None, timeout=None):
+ super(ShellOutSSHClient, self).__init__(hostname=hostname,
+ port=port, username=username,
+ password=password,
+ key=key,
+ key_files=key_files,
+ timeout=timeout)
+ if self.password:
+ raise ValueError('ShellOutSSHClient only supports key auth')
+
+ child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ child.communicate()
+
+ if child.returncode == 127:
+ raise ValueError('ssh client is not available')
+
+ self.logger = self._get_and_setup_logger()
+
+ def connect(self):
+ """
+ This client doesn't support persistent connections establish a new
+ connection every time "run" method is called.
+ """
+ return True
+
+ def run(self, cmd):
+ return self._run_remote_shell_command([cmd])
+
+ def put(self, path, contents=None, chmod=None, mode='w'):
+ if mode == 'w':
+ redirect = '>'
+ elif mode == 'a':
+ redirect = '>>'
+ else:
+ raise ValueError('Invalid mode: ' + mode)
+
+ cmd = ['echo "%s" %s %s' % (contents, redirect, path)]
+ self._run_remote_shell_command(cmd)
+ return path
+
+ def delete(self, path):
+ cmd = ['rm', '-rf', path]
+ self._run_remote_shell_command(cmd)
+ return True
+
+ def close(self):
+ return True
+
+ def _get_base_ssh_command(self):
+ cmd = ['ssh']
+
+ if self.key_files:
+ cmd += ['-i', self.key_files]
+
+ if self.timeout:
+ cmd += ['-oConnectTimeout=%s' % (self.timeout)]
+
+ cmd += ['%s@%s' % (self.username, self.hostname)]
+
+ return cmd
+
+ def _run_remote_shell_command(self, cmd):
+ """
+ Run a command on a remote server.
+
+ :param cmd: Command to run.
+ :type cmd: ``list`` of ``str``
+
+ :return: Command stdout, stderr and status code.
+ :rtype: ``tuple``
+ """
+ base_cmd = self._get_base_ssh_command()
+ full_cmd = base_cmd + [' '.join(cmd)]
+
+ self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd)))
+
+ child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = child.communicate()
+ return (stdout, stderr, child.returncode)
+
+
+class MockSSHClient(BaseSSHClient):
+ pass
+
+
+SSHClient = ParamikoSSHClient
+if not have_paramiko:
+ SSHClient = MockSSHClient
diff --git a/awx/lib/site-packages/libcloud/compute/types.py b/awx/lib/site-packages/libcloud/compute/types.py
new file mode 100644
index 0000000000..9f7a308cce
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/compute/types.py
@@ -0,0 +1,249 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Base types used by other parts of libcloud
+"""
+
+from libcloud.common.types import LibcloudError, MalformedResponseError
+from libcloud.common.types import InvalidCredsError, InvalidCredsException
+
+__all__ = [
+ "Provider",
+ "NodeState",
+ "DeploymentError",
+ "DeploymentException",
+
+ # @@TR: should the unused imports below be exported?
+ "LibcloudError",
+ "MalformedResponseError",
+ "InvalidCredsError",
+ "InvalidCredsException",
+ "DEPRECATED_RACKSPACE_PROVIDERS",
+ "OLD_CONSTANT_TO_NEW_MAPPING"
+]
+
+
+class Provider(object):
+ """
+ Defines for each of the supported providers
+
+ :cvar DUMMY: Example provider
+ :cvar EC2_US_EAST: Amazon AWS US N. Virgina
+ :cvar EC2_US_WEST: Amazon AWS US N. California
+ :cvar EC2_EU_WEST: Amazon AWS EU Ireland
+ :cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers
+ :cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers
+ :cvar GCE: Google Compute Engine
+ :cvar GOGRID: GoGrid
+ :cvar VPSNET: VPS.net
+ :cvar LINODE: Linode.com
+ :cvar VCLOUD: vmware vCloud
+ :cvar RIMUHOSTING: RimuHosting.com
+ :cvar ECP: Enomaly
+ :cvar IBM: IBM Developer Cloud
+ :cvar OPENNEBULA: OpenNebula.org
+ :cvar DREAMHOST: DreamHost Private Server
+ :cvar ELASTICHOSTS: ElasticHosts.com
+ :cvar CLOUDSIGMA: CloudSigma
+ :cvar NIMBUS: Nimbus
+ :cvar BLUEBOX: Bluebox
+ :cvar OPSOURCE: Opsource Cloud
+ :cvar NINEFOLD: Ninefold
+ :cvar TERREMARK: Terremark
+ :cvar EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon)
+ :cvar CLOUDSTACK: CloudStack
+ :cvar CLOUDSIGMA_US: CloudSigma US Las Vegas
+ :cvar LIBVIRT: Libvirt driver
+ :cvar JOYENT: Joyent driver
+ :cvar VCL: VCL driver
+ :cvar KTUCLOUD: kt ucloud driver
+ :cvar GRIDSPOT: Gridspot driver
+ :cvar ABIQUO: Abiquo driver
+ :cvar NEPHOSCALE: NephoScale driver
+ :cvar EXOSCALE: Exoscale driver.
+ :cvar IKOULA: Ikoula driver.
+ :cvar OUTSCALE_SAS: Outscale SAS driver.
+ :cvar OUTSCALE_INC: Outscale INC driver.
+ """
+ DUMMY = 'dummy'
+ EC2 = 'ec2_us_east'
+ RACKSPACE = 'rackspace'
+ GCE = 'gce'
+ GOGRID = 'gogrid'
+ VPSNET = 'vpsnet'
+ LINODE = 'linode'
+ VCLOUD = 'vcloud'
+ RIMUHOSTING = 'rimuhosting'
+ VOXEL = 'voxel'
+ SOFTLAYER = 'softlayer'
+ EUCALYPTUS = 'eucalyptus'
+ ECP = 'ecp'
+ IBM = 'ibm'
+ OPENNEBULA = 'opennebula'
+ DREAMHOST = 'dreamhost'
+ ELASTICHOSTS = 'elastichosts'
+ BRIGHTBOX = 'brightbox'
+ CLOUDSIGMA = 'cloudsigma'
+ NIMBUS = 'nimbus'
+ BLUEBOX = 'bluebox'
+ GANDI = 'gandi'
+ OPSOURCE = 'opsource'
+ OPENSTACK = 'openstack'
+ SKALICLOUD = 'skalicloud'
+ SERVERLOVE = 'serverlove'
+ NINEFOLD = 'ninefold'
+ TERREMARK = 'terremark'
+ CLOUDSTACK = 'cloudstack'
+ LIBVIRT = 'libvirt'
+ JOYENT = 'joyent'
+ VCL = 'vcl'
+ KTUCLOUD = 'ktucloud'
+ GRIDSPOT = 'gridspot'
+ RACKSPACE_FIRST_GEN = 'rackspace_first_gen'
+ HOSTVIRTUAL = 'hostvirtual'
+ ABIQUO = 'abiquo'
+ DIGITAL_OCEAN = 'digitalocean'
+ NEPHOSCALE = 'nephoscale'
+ CLOUDFRAMES = 'cloudframes'
+ EXOSCALE = 'exoscale'
+ IKOULA = 'ikoula'
+ OUTSCALE_SAS = 'outscale_sas'
+ OUTSCALE_INC = 'outscale_inc'
+
+ # OpenStack based providers
+ HPCLOUD = 'hpcloud'
+ KILI = 'kili'
+
+ # Deprecated constants which are still supported
+ EC2_US_EAST = 'ec2_us_east'
+ EC2_EU = 'ec2_eu_west' # deprecated name
+ EC2_EU_WEST = 'ec2_eu_west'
+ EC2_US_WEST = 'ec2_us_west'
+ EC2_AP_SOUTHEAST = 'ec2_ap_southeast'
+ EC2_AP_NORTHEAST = 'ec2_ap_northeast'
+ EC2_US_WEST_OREGON = 'ec2_us_west_oregon'
+ EC2_SA_EAST = 'ec2_sa_east'
+ EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2'
+
+ ELASTICHOSTS_UK1 = 'elastichosts_uk1'
+ ELASTICHOSTS_UK2 = 'elastichosts_uk2'
+ ELASTICHOSTS_US1 = 'elastichosts_us1'
+ ELASTICHOSTS_US2 = 'elastichosts_us2'
+ ELASTICHOSTS_US3 = 'elastichosts_us3'
+ ELASTICHOSTS_CA1 = 'elastichosts_ca1'
+ ELASTICHOSTS_AU1 = 'elastichosts_au1'
+ ELASTICHOSTS_CN1 = 'elastichosts_cn1'
+
+ CLOUDSIGMA_US = 'cloudsigma_us'
+
+ # Deprecated constants which aren't supported anymore
+ RACKSPACE_UK = 'rackspace_uk'
+ RACKSPACE_NOVA_BETA = 'rackspace_nova_beta'
+ RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw'
+ RACKSPACE_NOVA_LON = 'rackspace_nova_lon'
+ RACKSPACE_NOVA_ORD = 'rackspace_nova_ord'
+
+ # Removed
+ # SLICEHOST = 'slicehost'
+
+
+DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK,
+ Provider.RACKSPACE_NOVA_BETA,
+ Provider.RACKSPACE_NOVA_DFW,
+ Provider.RACKSPACE_NOVA_LON,
+ Provider.RACKSPACE_NOVA_ORD]
+OLD_CONSTANT_TO_NEW_MAPPING = {
+ Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN,
+ Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN,
+
+ Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE,
+ Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE,
+ Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE,
+ Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE
+}
+
+
+class NodeState(object):
+ """
+ Standard states for a node
+
+ :cvar RUNNING: Node is running.
+ :cvar REBOOTING: Node is rebooting.
+ :cvar TERMINATED: Node is terminated. This node can't be started later on.
+ :cvar STOPPED: Node is stopped. This node can be started later on.
+ :cvar PENDING: Node is pending.
+ :cvar UNKNOWN: Node state is unknown.
+ """
+ RUNNING = 0
+ REBOOTING = 1
+ TERMINATED = 2
+ PENDING = 3
+ UNKNOWN = 4
+ STOPPED = 5
+
+
+class Architecture(object):
+ """
+ Image and size architectures.
+
+ :cvar I386: i386 (32 bt)
+ :cvar X86_64: x86_64 (64 bit)
+ """
+ I386 = 0
+ X86_X64 = 1
+
+
+class DeploymentError(LibcloudError):
+ """
+ Exception used when a Deployment Task failed.
+
+ :ivar node: :class:`Node` on which this exception happened, you might want
+ to call :func:`Node.destroy`
+ """
+ def __init__(self, node, original_exception=None, driver=None):
+ self.node = node
+ self.value = original_exception
+ self.driver = driver
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ((''
+ % (self.node.id, str(self.value), str(self.driver))))
+
+
+class KeyPairError(LibcloudError):
+ error_type = 'KeyPairError'
+
+ def __init__(self, name, driver):
+ self.name = name
+ self.value = 'Key pair with name %s does not exist' % (name)
+ super(KeyPairError, self).__init__(value=self.value, driver=driver)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ('<%s name=%s, value=%s, driver=%s>' %
+ (self.error_type, self.name, self.value, self.driver.name))
+
+
+class KeyPairDoesNotExistError(KeyPairError):
+ error_type = 'KeyPairDoesNotExistError'
+
+
+"""Deprecated alias of :class:`DeploymentException`"""
+DeploymentException = DeploymentError
diff --git a/awx/lib/site-packages/libcloud/data/pricing.json b/awx/lib/site-packages/libcloud/data/pricing.json
new file mode 100644
index 0000000000..e8aa7bf3c2
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/data/pricing.json
@@ -0,0 +1,566 @@
+{
+ "compute": {
+ "ec2_us_west_oregon": {
+ "m3.medium": "0.070",
+ "m3.large": "0.140",
+ "m3.xlarge": "0.280",
+ "m3.2xlarge": "0.560",
+ "m1.small": "0.044",
+ "m1.medium": "0.087",
+ "m1.large": "0.175",
+ "m1.xlarge": "0.350",
+ "c3.large": "0.105",
+ "c3.xlarge": "0.210",
+ "c3.2xlarge": "0.420",
+ "c3.4xlarge": "0.840",
+ "c3.8xlarge": "1.680",
+ "c1.medium": "0.130",
+ "c1.xlarge": "0.520",
+ "cc2.8xlarge": "2.000",
+ "g2.2xlarge": "0.650",
+ "r3.large": "0.175",
+ "r3.xlarge": "0.350",
+ "r3.2xlarge": "0.700",
+ "r3.4xlarge": "1.400",
+ "r3.8xlarge": "2.800",
+ "m2.xlarge": "0.245",
+ "m2.2xlarge": "0.490",
+ "m2.4xlarge": "0.980",
+ "cr1.8xlarge": "3.500",
+ "i2.xlarge": "0.853",
+ "i2.2xlarge": "1.705",
+ "i2.4xlarge": "3.410",
+ "i2.8xlarge": "6.820",
+ "hs1.8xlarge": "4.600",
+ "hi1.4xlarge": "3.100",
+ "t1.micro": "0.020"
+ },
+ "ec2_us_west": {
+ "m3.medium": "0.077",
+ "m3.large": "0.154",
+ "m3.xlarge": "0.308",
+ "m3.2xlarge": "0.616",
+ "m1.small": "0.047",
+ "m1.medium": "0.095",
+ "m1.large": "0.190",
+ "m1.xlarge": "0.379",
+ "c3.large": "0.120",
+ "c3.xlarge": "0.239",
+ "c3.2xlarge": "0.478",
+ "c3.4xlarge": "0.956",
+ "c3.8xlarge": "1.912",
+ "c1.medium": "0.148",
+ "c1.xlarge": "0.592",
+ "g2.2xlarge": "0.702",
+ "r3.large": "0.195",
+ "r3.xlarge": "0.390",
+ "r3.2xlarge": "0.780",
+ "r3.4xlarge": "1.560",
+ "r3.8xlarge": "3.120",
+ "m2.xlarge": "0.275",
+ "m2.2xlarge": "0.550",
+ "m2.4xlarge": "1.100",
+ "i2.xlarge": "0.938",
+ "i2.2xlarge": "1.876",
+ "i2.4xlarge": "3.751",
+ "i2.8xlarge": "7.502",
+ "t1.micro": "0.025"
+ },
+ "ec2_eu_west": {
+ "m3.medium": "0.077",
+ "m3.large": "0.154",
+ "m3.xlarge": "0.308",
+ "m3.2xlarge": "0.616",
+ "m1.small": "0.047",
+ "m1.medium": "0.095",
+ "m1.large": "0.190",
+ "m1.xlarge": "0.379",
+ "c3.large": "0.120",
+ "c3.xlarge": "0.239",
+ "c3.2xlarge": "0.478",
+ "c3.4xlarge": "0.956",
+ "c3.8xlarge": "1.912",
+ "c1.medium": "0.148",
+ "c1.xlarge": "0.592",
+ "cc2.8xlarge": "2.250",
+ "g2.2xlarge": "0.702",
+ "cg1.4xlarge": "2.360",
+ "r3.large": "0.195",
+ "r3.xlarge": "0.390",
+ "r3.2xlarge": "0.780",
+ "r3.4xlarge": "1.560",
+ "r3.8xlarge": "3.120",
+ "m2.xlarge": "0.275",
+ "m2.2xlarge": "0.550",
+ "m2.4xlarge": "1.100",
+ "cr1.8xlarge": "3.750",
+ "i2.xlarge": "0.938",
+ "i2.2xlarge": "1.876",
+ "i2.4xlarge": "3.751",
+ "i2.8xlarge": "7.502",
+ "hs1.8xlarge": "4.900",
+ "hi1.4xlarge": "3.100",
+ "t1.micro": "0.020"
+ },
+ "rackspacenovalon": {
+ "performance2-60": 2.72,
+ "performance2-120": 5.44,
+ "performance1-1": 0.04,
+ "performance2-15": 0.68,
+ "performance1-4": 0.16,
+ "performance2-30": 1.36,
+ "performance2-90": 4.08,
+ "3": 0.064,
+ "2": 0.032,
+ "performance1-2": 0.08,
+ "4": 0.129,
+ "7": 0.967,
+ "6": 0.516,
+ "5": 0.258,
+ "performance1-8": 0.32,
+ "8": 1.612
+ },
+ "ec2_ap_southeast_2": {
+ "m3.medium": "0.098",
+ "m3.large": "0.196",
+ "m3.xlarge": "0.392",
+ "m3.2xlarge": "0.784",
+ "m1.small": "0.058",
+ "m1.medium": "0.117",
+ "m1.large": "0.233",
+ "m1.xlarge": "0.467",
+ "c3.large": "0.132",
+ "c3.xlarge": "0.265",
+ "c3.2xlarge": "0.529",
+ "c3.4xlarge": "1.058",
+ "c3.8xlarge": "2.117",
+ "c1.medium": "0.164",
+ "c1.xlarge": "0.655",
+ "r3.large": "0.210",
+ "r3.xlarge": "0.420",
+ "r3.2xlarge": "0.840",
+ "r3.4xlarge": "1.680",
+ "r3.8xlarge": "3.360",
+ "m2.xlarge": "0.296",
+ "m2.2xlarge": "0.592",
+ "m2.4xlarge": "1.183",
+ "i2.xlarge": "1.018",
+ "i2.2xlarge": "2.035",
+ "i2.4xlarge": "4.070",
+ "i2.8xlarge": "8.140",
+ "hs1.8xlarge": "5.570",
+ "t1.micro": "0.020"
+ },
+ "vps_net": {
+ "1": 0.416
+ },
+ "ec2_us_east": {
+ "m3.medium": "0.070",
+ "m3.large": "0.140",
+ "m3.xlarge": "0.280",
+ "m3.2xlarge": "0.560",
+ "m1.small": "0.044",
+ "m1.medium": "0.087",
+ "m1.large": "0.175",
+ "m1.xlarge": "0.350",
+ "c3.large": "0.105",
+ "c3.xlarge": "0.210",
+ "c3.2xlarge": "0.420",
+ "c3.4xlarge": "0.840",
+ "c3.8xlarge": "1.680",
+ "c1.medium": "0.130",
+ "c1.xlarge": "0.520",
+ "cc2.8xlarge": "2.000",
+ "g2.2xlarge": "0.650",
+ "cg1.4xlarge": "2.100",
+ "r3.large": "0.175",
+ "r3.xlarge": "0.350",
+ "r3.2xlarge": "0.700",
+ "r3.4xlarge": "1.400",
+ "r3.8xlarge": "2.800",
+ "m2.xlarge": "0.245",
+ "m2.2xlarge": "0.490",
+ "m2.4xlarge": "0.980",
+ "cr1.8xlarge": "3.500",
+ "i2.xlarge": "0.853",
+ "i2.2xlarge": "1.705",
+ "i2.4xlarge": "3.410",
+ "i2.8xlarge": "6.820",
+ "hs1.8xlarge": "4.600",
+ "hi1.4xlarge": "3.100",
+ "t1.micro": "0.020"
+ },
+ "rackspacenovaus": {
+ "performance2-60": 2.72,
+ "performance2-120": 5.44,
+ "performance1-1": 0.04,
+ "performance2-15": 0.68,
+ "performance1-4": 0.16,
+ "performance2-30": 1.36,
+ "performance2-90": 4.08,
+ "3": 0.06,
+ "2": 0.022,
+ "performance1-2": 0.08,
+ "4": 0.12,
+ "7": 0.96,
+ "6": 0.48,
+ "5": 0.24,
+ "performance1-8": 0.32,
+ "8": 1.2
+ },
+ "ec2_sa_east": {
+ "m3.medium": "0.095",
+ "m3.large": "0.190",
+ "m3.xlarge": "0.381",
+ "m3.2xlarge": "0.761",
+ "m1.small": "0.058",
+ "m1.medium": "0.117",
+ "m1.large": "0.233",
+ "m1.xlarge": "0.467",
+ "c1.medium": "0.179",
+ "c1.xlarge": "0.718",
+ "m2.xlarge": "0.323",
+ "m2.2xlarge": "0.645",
+ "m2.4xlarge": "1.291",
+ "t1.micro": "0.027"
+ },
+ "cloudsigma_zrh": {
+ "high-cpu-medium": 0.211,
+ "standard-large": 0.381,
+ "micro-high-cpu": 0.381,
+ "standard-extra-large": 0.762,
+ "high-memory-double-extra-large": 1.383,
+ "micro-regular": 0.0548,
+ "standard-small": 0.0796,
+ "high-memory-extra-large": 0.642,
+ "high-cpu-extra-large": 0.78
+ },
+ "rackspacenovasyd": {
+ "performance2-60": 2.72,
+ "performance2-120": 5.44,
+ "performance1-1": 0.04,
+ "performance2-15": 0.68,
+ "performance1-4": 0.16,
+ "performance2-30": 1.36,
+ "performance2-90": 4.08,
+ "3": 0.072,
+ "2": 0.026,
+ "performance1-2": 0.08,
+ "4": 0.144,
+ "7": 1.08,
+ "6": 0.576,
+ "5": 0.288,
+ "performance1-8": 0.32,
+ "8": 1.44
+ },
+ "ec2_ap_northeast": {
+ "m3.medium": "0.101",
+ "m3.large": "0.203",
+ "m3.xlarge": "0.405",
+ "m3.2xlarge": "0.810",
+ "m1.small": "0.061",
+ "m1.medium": "0.122",
+ "m1.large": "0.243",
+ "m1.xlarge": "0.486",
+ "c3.large": "0.128",
+ "c3.xlarge": "0.255",
+ "c3.2xlarge": "0.511",
+ "c3.4xlarge": "1.021",
+ "c3.8xlarge": "2.043",
+ "c1.medium": "0.158",
+ "c1.xlarge": "0.632",
+ "cc2.8xlarge": "2.349",
+ "g2.2xlarge": "0.898",
+ "r3.large": "0.210",
+ "r3.xlarge": "0.420",
+ "r3.2xlarge": "0.840",
+ "r3.4xlarge": "1.680",
+ "r3.8xlarge": "3.360",
+ "m2.xlarge": "0.287",
+ "m2.2xlarge": "0.575",
+ "m2.4xlarge": "1.150",
+ "cr1.8xlarge": "4.105",
+ "i2.xlarge": "1.001",
+ "i2.2xlarge": "2.001",
+ "i2.4xlarge": "4.002",
+ "i2.8xlarge": "8.004",
+ "hs1.8xlarge": "5.400",
+ "hi1.4xlarge": "3.276",
+ "t1.micro": "0.026"
+ },
+ "gogrid": {
+ "24GB": 4.56,
+ "512MB": 0.095,
+ "8GB": 1.52,
+ "4GB": 0.76,
+ "2GB": 0.38,
+ "1GB": 0.19,
+ "16GB": 3.04
+ },
+ "serverlove": {
+ "high-cpu-medium": 0.291,
+ "medium": 0.404,
+ "large": 0.534,
+ "small": 0.161,
+ "extra-large": 0.615,
+ "high-cpu-extra-large": 0.776
+ },
+ "elastichosts": {
+ "high-cpu-medium": 0.18,
+ "medium": 0.223,
+ "large": 0.378,
+ "small": 0.1,
+ "extra-large": 0.579,
+ "high-cpu-extra-large": 0.77
+ },
+ "rackspace": {
+ "performance2-60": 2.72,
+ "performance2-120": 5.44,
+ "performance1-1": 0.04,
+ "performance2-15": 0.68,
+ "performance1-4": 0.16,
+ "performance2-30": 1.36,
+ "1": 0.015,
+ "performance2-90": 4.08,
+ "3": 0.06,
+ "2": 0.03,
+ "performance1-2": 0.08,
+ "4": 0.12,
+ "7": 0.96,
+ "6": 0.48,
+ "5": 0.24,
+ "performance1-8": 0.32,
+ "8": 1.8
+ },
+ "nephoscale": {
+ "11": 0.35,
+ "27": 0.0,
+ "48": 0.15,
+ "46": 0.1,
+ "54": 0.938,
+ "56": 0.75,
+ "50": 0.28,
+ "52": 0.48,
+ "1": 0.6,
+ "3": 0.063,
+ "5": 0.031,
+ "7": 0.125,
+ "9": 0.188
+ },
+ "nimbus": {
+ "m1.xlarge": 0.0,
+ "m1.small": 0.0,
+ "m1.large": 0.0
+ },
+ "gandi": {
+ "1": 0.02,
+ "small": 0.02,
+ "large": 0.06,
+ "medium": 0.03,
+ "x-large": 0.12
+ },
+ "skalicloud": {
+ "high-cpu-medium": 0.249,
+ "medium": 0.301,
+ "large": 0.505,
+ "small": 0.136,
+ "extra-large": 0.654,
+ "high-cpu-extra-large": 0.936
+ },
+ "bluebox": {
+ "4gb": 0.35,
+ "2gb": 0.25,
+ "8gb": 0.45,
+ "1gb": 0.15
+ },
+ "ec2_ap_southeast": {
+ "m3.medium": "0.098",
+ "m3.large": "0.196",
+ "m3.xlarge": "0.392",
+ "m3.2xlarge": "0.784",
+ "m1.small": "0.058",
+ "m1.medium": "0.117",
+ "m1.large": "0.233",
+ "m1.xlarge": "0.467",
+ "c3.large": "0.132",
+ "c3.xlarge": "0.265",
+ "c3.2xlarge": "0.529",
+ "c3.4xlarge": "1.058",
+ "c3.8xlarge": "2.117",
+ "c1.medium": "0.164",
+ "c1.xlarge": "0.655",
+ "r3.large": "0.210",
+ "r3.xlarge": "0.420",
+ "r3.2xlarge": "0.840",
+ "r3.4xlarge": "1.680",
+ "r3.8xlarge": "3.360",
+ "m2.xlarge": "0.296",
+ "m2.2xlarge": "0.592",
+ "m2.4xlarge": "1.183",
+ "i2.xlarge": "1.018",
+ "i2.2xlarge": "2.035",
+ "i2.4xlarge": "4.070",
+ "i2.8xlarge": "8.140",
+ "hs1.8xlarge": "5.570",
+ "t1.micro": "0.020"
+ },
+ "cloudsigma_lvs": {
+ "high-cpu-medium": 0.0,
+ "standard-large": 0.0,
+ "micro-high-cpu": 0.0,
+ "standard-extra-large": 0.0,
+ "high-memory-double-extra-large": 0.0,
+ "micro-regular": 0.0,
+ "standard-small": 0.0,
+ "high-memory-extra-large": 0.0,
+ "high-cpu-extra-large": 0.0
+ },
+ "dreamhost": {
+ "default": 115,
+ "high": 150,
+ "minimum": 15,
+ "maximum": 200,
+ "low": 50
+ },
+ "osc_sas_eu_west_3": {
+ "t1.micro": "0.040",
+ "m1.small": "0.090",
+ "m1.medium": "0.130",
+ "m1.large": "0.360",
+ "m1.xlarge": "0.730",
+ "c1.medium": "0.230",
+ "c1.xlarge": "0.900",
+ "m2.xlarge": "0.460",
+ "m2.2xlarge": "0.920",
+ "m2.4xlarge": "1.840",
+ "nv1.small": "5.220",
+ "nv1.medium": "5.310",
+ "nv1.large": "5.490",
+ "nv1.xlarge": "5.860",
+ "cc1.4xlarge": "1.460",
+ "cc2.8xlarge": "2.700",
+ "m3.xlarge": "0.780",
+ "m3.2xlarge": "1.560",
+ "cr1.8xlarge": "3.750",
+ "os1.8xlarge": "6.400",
+ "os1.8xlarge": "6.400"
+ },
+ "osc_sas_eu_west_1": {
+ "t1.micro": "0.040",
+ "m1.small": "0.090",
+ "m1.medium": "0.130",
+ "m1.large": "0.360",
+ "m1.xlarge": "0.730",
+ "c1.medium": "0.230",
+ "c1.xlarge": "0.900",
+ "m2.xlarge": "0.460",
+ "m2.2xlarge": "0.920",
+ "m2.4xlarge": "1.840",
+ "nv1.small": "5.220",
+ "nv1.medium": "5.310",
+ "nv1.large": "5.490",
+ "nv1.xlarge": "5.860",
+ "cc1.4xlarge": "1.460",
+ "cc2.8xlarge": "2.700",
+ "m3.xlarge": "0.780",
+ "m3.2xlarge": "1.560",
+ "cr1.8xlarge": "3.750",
+ "os1.8xlarge": "6.400",
+ "os1.8xlarge": "6.400"
+ },
+ "osc_sas_us_east_1": {
+ "t1.micro": "0.020",
+ "m1.small": "0.070",
+ "m1.medium": "0.180",
+ "m1.large": "0.260",
+ "m1.xlarge": "0.730",
+ "c1.medium": "0.170",
+ "c1.xlarge": "0.660",
+ "m2.xlarge": "0.460",
+ "m2.2xlarge": "1.020",
+ "m2.4xlarge": "2.040",
+ "nv1.small": "5.220",
+ "nv1.medium": "5.310",
+ "nv1.large": "5.490",
+ "nv1.xlarge": "5.860",
+ "cc1.4xlarge": "1.610",
+ "cc2.8xlarge": "2.700",
+ "m3.xlarge": "0.550",
+ "m3.2xlarge": "1.560",
+ "cr1.8xlarge": "3.750",
+ "os1.8xlarge": "6.400",
+ "os1.8xlarge": "6.400"
+ },
+ "osc_inc_eu_west_1": {
+ "t1.micro": "0.040",
+ "m1.small": "0.090",
+ "m1.medium": "0.120",
+ "m1.large": "0.360",
+ "m1.xlarge": "0.730",
+ "c1.medium": "0.230",
+ "c1.xlarge": "0.900",
+ "m2.xlarge": "0.410",
+ "m2.2xlarge": "0.820",
+ "m2.4xlarge": "1.640",
+ "nv1.small": "5.220",
+ "nv1.medium": "5.250",
+ "nv1.large": "5.490",
+ "nv1.xlarge": "5.610",
+ "cc1.4xlarge": "1.300",
+ "cc2.8xlarge": "2.400",
+ "m3.xlarge": "0.780",
+ "m3.2xlarge": "1.560",
+ "cr1.8xlarge": "3.500",
+ "os1.8xlarge": "4.310",
+ "os1.8xlarge": "4.310"
+ },
+ "osc_inc_eu_west_3": {
+ "t1.micro": "0.040",
+ "m1.small": "0.090",
+ "m1.medium": "0.120",
+ "m1.large": "0.360",
+ "m1.xlarge": "0.730",
+ "c1.medium": "0.230",
+ "c1.xlarge": "0.900",
+ "m2.xlarge": "0.410",
+ "m2.2xlarge": "0.820",
+ "m2.4xlarge": "1.640",
+ "nv1.small": "5.220",
+ "nv1.medium": "5.250",
+ "nv1.large": "5.490",
+ "nv1.xlarge": "5.610",
+ "cc1.4xlarge": "1.300",
+ "cc2.8xlarge": "2.400",
+ "m3.xlarge": "0.780",
+ "m3.2xlarge": "1.560",
+ "cr1.8xlarge": "3.500",
+ "os1.8xlarge": "4.310",
+ "os1.8xlarge": "4.310"
+ },
+ "osc_inc_us_east_1": {
+ "t1.micro": "0.020",
+ "m1.small": "0.060",
+ "m1.medium": "0.180",
+ "m1.large": "0.240",
+ "m1.xlarge": "0.730",
+ "c1.medium": "0.150",
+ "c1.xlarge": "0.580",
+ "m2.xlarge": "0.410",
+ "m2.2xlarge": "1.020",
+ "m2.4xlarge": "2.040",
+ "nv1.small": "5.190",
+ "nv1.medium": "5.250",
+ "nv1.large": "5.490",
+ "nv1.xlarge": "5.610",
+ "cc1.4xlarge": "1.610",
+ "cc2.8xlarge": "2.400",
+ "m3.xlarge": "0.500",
+ "m3.2xlarge": "1.560",
+ "cr1.8xlarge": "3.500",
+ "os1.8xlarge": "6.400",
+ "os1.8xlarge": "6.400"
+ }
+ },
+ "storage": {},
+ "updated": 1397154837
+}
diff --git a/awx/lib/site-packages/libcloud/dns/__init__.py b/awx/lib/site-packages/libcloud/dns/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/awx/lib/site-packages/libcloud/dns/base.py b/awx/lib/site-packages/libcloud/dns/base.py
new file mode 100644
index 0000000000..0e3ac6fe85
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/base.py
@@ -0,0 +1,486 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import with_statement
+
+import datetime
+
+from libcloud import __version__
+from libcloud.common.base import ConnectionUserAndKey, BaseDriver
+from libcloud.dns.types import RecordType
+
+__all__ = [
+ 'Zone',
+ 'Record',
+ 'DNSDriver'
+]
+
+
+class Zone(object):
+ """
+ DNS zone.
+ """
+
+ def __init__(self, id, domain, type, ttl, driver, extra=None):
+ """
+ :param id: Zone id.
+ :type id: ``str``
+
+ :param domain: The name of the domain.
+ :type domain: ``str``
+
+ :param type: Zone type (master, slave).
+ :type type: ``str``
+
+ :param ttl: Default TTL for records in this zone (in seconds).
+ :type ttl: ``int``
+
+ :param driver: DNSDriver instance.
+ :type driver: :class:`DNSDriver`
+
+ :param extra: (optional) Extra attributes (driver specific).
+ :type extra: ``dict``
+ """
+ self.id = str(id) if id else None
+ self.domain = domain
+ self.type = type
+ self.ttl = ttl or None
+ self.driver = driver
+ self.extra = extra or {}
+
+ def list_records(self):
+ return self.driver.list_records(zone=self)
+
+ def create_record(self, name, type, data, extra=None):
+ return self.driver.create_record(name=name, zone=self, type=type,
+ data=data, extra=extra)
+
+ def update(self, domain=None, type=None, ttl=None, extra=None):
+ return self.driver.update_zone(zone=self, domain=domain, type=type,
+ ttl=ttl, extra=extra)
+
+ def delete(self):
+ return self.driver.delete_zone(zone=self)
+
+ def export_to_bind_format(self):
+ return self.driver.export_zone_to_bind_format(zone=self)
+
+ def export_to_bind_zone_file(self, file_path):
+ self.driver.export_zone_to_bind_zone_file(zone=self,
+ file_path=file_path)
+
+ def __repr__(self):
+ return ('' %
+ (self.domain, self.ttl, self.driver.name))
+
+
+class Record(object):
+ """
+ Zone record / resource.
+ """
+
+ def __init__(self, id, name, type, data, zone, driver, extra=None):
+ """
+ :param id: Record id
+ :type id: ``str``
+
+ :param name: Hostname or FQDN.
+ :type name: ``str``
+
+ :param type: DNS record type (A, AAAA, ...).
+ :type type: :class:`RecordType`
+
+ :param data: Data for the record (depends on the record type).
+ :type data: ``str``
+
+ :param zone: Zone instance.
+ :type zone: :class:`Zone`
+
+ :param driver: DNSDriver instance.
+ :type driver: :class:`DNSDriver`
+
+ :param extra: (optional) Extra attributes (driver specific).
+ :type extra: ``dict``
+ """
+ self.id = str(id) if id else None
+ self.name = name
+ self.type = type
+ self.data = data
+ self.zone = zone
+ self.driver = driver
+ self.extra = extra or {}
+
+ def update(self, name=None, type=None, data=None, extra=None):
+ return self.driver.update_record(record=self, name=name, type=type,
+ data=data, extra=extra)
+
+ def delete(self):
+ return self.driver.delete_record(record=self)
+
+ def _get_numeric_id(self):
+ record_id = self.id
+
+ if record_id.isdigit():
+ record_id = int(record_id)
+
+ return record_id
+
+ def __repr__(self):
+ return ('' %
+ (self.zone.id, self.name, self.type, self.data,
+ self.driver.name))
+
+
+class DNSDriver(BaseDriver):
+ """
+ A base DNSDriver class to derive from
+
+ This class is always subclassed by a specific driver.
+ """
+ connectionCls = ConnectionUserAndKey
+ name = None
+ website = None
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ **kwargs):
+ """
+ :param key: API key or username to used (required)
+ :type key: ``str``
+
+ :param secret: Secret password to be used (required)
+ :type secret: ``str``
+
+ :param secure: Weither to use HTTPS or HTTP. Note: Some providers
+ only support HTTPS, and it is on by default.
+ :type secure: ``bool``
+
+ :param host: Override hostname used for connections.
+ :type host: ``str``
+
+ :param port: Override port used for connections.
+ :type port: ``int``
+
+ :return: ``None``
+ """
+ super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure,
+ host=host, port=port, **kwargs)
+
+ def list_record_types(self):
+ """
+ Return a list of RecordType objects supported by the provider.
+
+ :return: ``list`` of :class:`RecordType`
+ """
+ return list(self.RECORD_TYPE_MAP.keys())
+
+ def iterate_zones(self):
+ """
+ Return a generator to iterate over available zones.
+
+ :rtype: ``generator`` of :class:`Zone`
+ """
+ raise NotImplementedError(
+ 'iterate_zones not implemented for this driver')
+
+ def list_zones(self):
+ """
+ Return a list of zones.
+
+ :return: ``list`` of :class:`Zone`
+ """
+ return list(self.iterate_zones())
+
+ def iterate_records(self, zone):
+ """
+ Return a generator to iterate over records for the provided zone.
+
+ :param zone: Zone to list records for.
+ :type zone: :class:`Zone`
+
+ :rtype: ``generator`` of :class:`Record`
+ """
+ raise NotImplementedError(
+ 'iterate_records not implemented for this driver')
+
+ def list_records(self, zone):
+ """
+ Return a list of records for the provided zone.
+
+ :param zone: Zone to list records for.
+ :type zone: :class:`Zone`
+
+ :return: ``list`` of :class:`Record`
+ """
+ return list(self.iterate_records(zone))
+
+ def get_zone(self, zone_id):
+ """
+ Return a Zone instance.
+
+ :param zone_id: ID of the required zone
+ :type zone_id: ``str``
+
+ :rtype: :class:`Zone`
+ """
+ raise NotImplementedError(
+ 'get_zone not implemented for this driver')
+
+ def get_record(self, zone_id, record_id):
+ """
+ Return a Record instance.
+
+ :param zone_id: ID of the required zone
+ :type zone_id: ``str``
+
+ :param record_id: ID of the required record
+ :type record_id: ``str``
+
+ :rtype: :class:`Record`
+ """
+ raise NotImplementedError(
+ 'get_record not implemented for this driver')
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ """
+ Create a new zone.
+
+ :param domain: Zone domain name (e.g. example.com)
+ :type domain: ``str``
+
+ :param type: Zone type (master / slave).
+ :type type: ``str``
+
+ :param ttl: TTL for new records. (optional)
+ :type ttl: ``int``
+
+ :param extra: Extra attributes (driver specific). (optional)
+ :type extra: ``dict``
+
+ :rtype: :class:`Zone`
+ """
+ raise NotImplementedError(
+ 'create_zone not implemented for this driver')
+
+ def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
+ """
+ Update en existing zone.
+
+ :param zone: Zone to update.
+ :type zone: :class:`Zone`
+
+ :param domain: Zone domain name (e.g. example.com)
+ :type domain: ``str``
+
+ :param type: Zone type (master / slave).
+ :type type: ``str``
+
+ :param ttl: TTL for new records. (optional)
+ :type ttl: ``int``
+
+ :param extra: Extra attributes (driver specific). (optional)
+ :type extra: ``dict``
+
+ :rtype: :class:`Zone`
+ """
+ raise NotImplementedError(
+ 'update_zone not implemented for this driver')
+
+ def create_record(self, name, zone, type, data, extra=None):
+ """
+ Create a new record.
+
+ :param name: Record name without the domain name (e.g. www).
+ Note: If you want to create a record for a base domain
+ name, you should specify empty string ('') for this
+ argument.
+ :type name: ``str``
+
+ :param zone: Zone where the requested record is created.
+ :type zone: :class:`Zone`
+
+ :param type: DNS record type (A, AAAA, ...).
+ :type type: :class:`RecordType`
+
+ :param data: Data for the record (depends on the record type).
+ :type data: ``str``
+
+ :param extra: Extra attributes (driver specific). (optional)
+ :type extra: ``dict``
+
+ :rtype: :class:`Record`
+ """
+ raise NotImplementedError(
+ 'create_record not implemented for this driver')
+
+ def update_record(self, record, name, type, data, extra):
+ """
+ Update an existing record.
+
+ :param record: Record to update.
+ :type record: :class:`Record`
+
+ :param name: Record name without the domain name (e.g. www).
+ Note: If you want to create a record for a base domain
+ name, you should specify empty string ('') for this
+ argument.
+ :type name: ``str``
+
+ :param type: DNS record type (A, AAAA, ...).
+ :type type: :class:`RecordType`
+
+ :param data: Data for the record (depends on the record type).
+ :type data: ``str``
+
+ :param extra: (optional) Extra attributes (driver specific).
+ :type extra: ``dict``
+
+ :rtype: :class:`Record`
+ """
+ raise NotImplementedError(
+ 'update_record not implemented for this driver')
+
+ def delete_zone(self, zone):
+ """
+ Delete a zone.
+
+ Note: This will delete all the records belonging to this zone.
+
+ :param zone: Zone to delete.
+ :type zone: :class:`Zone`
+
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'delete_zone not implemented for this driver')
+
+ def delete_record(self, record):
+ """
+ Delete a record.
+
+ :param record: Record to delete.
+ :type record: :class:`Record`
+
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'delete_record not implemented for this driver')
+
+ def export_zone_to_bind_format(self, zone):
+ """
+ Export Zone object to the BIND compatible format.
+
+ :param zone: Zone to export.
+ :type zone: :class:`Zone`
+
+ :return: Zone data in BIND compatible format.
+ :rtype: ``str``
+ """
+ if zone.type != 'master':
+ raise ValueError('You can only generate BIND out for master zones')
+
+ lines = []
+
+ # For consistent output, records are sorted based on the id
+ records = zone.list_records()
+ records = sorted(records, key=Record._get_numeric_id)
+
+ date = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')
+ values = {'version': __version__, 'date': date}
+
+ lines.append('; Generated by Libcloud v%(version)s on %(date)s' %
+ values)
+ lines.append('$ORIGIN %(domain)s.' % {'domain': zone.domain})
+ lines.append('$TTL %(domain_ttl)s\n' % {'domain_ttl': zone.ttl})
+
+ for record in records:
+ line = self._get_bind_record_line(record=record)
+ lines.append(line)
+
+ output = '\n'.join(lines)
+ return output
+
+ def export_zone_to_bind_zone_file(self, zone, file_path):
+ """
+ Export Zone object to the BIND compatible format and write result to a
+ file.
+
+ :param zone: Zone to export.
+ :type zone: :class:`Zone`
+
+ :param file_path: File path where the output will be saved.
+ :type file_path: ``str``
+ """
+ result = self.export_zone_to_bind_format(zone=zone)
+
+ with open(file_path, 'w') as fp:
+ fp.write(result)
+
+ def _get_bind_record_line(self, record):
+ """
+ Generate BIND record line for the provided record.
+
+ :param record: Record to generate the line for.
+ :type record: :class:`Record`
+
+ :return: Bind compatible record line.
+ :rtype: ``str``
+ """
+ parts = []
+
+ if record.name:
+ name = '%(name)s.%(domain)s' % {'name': record.name,
+ 'domain': record.zone.domain}
+ else:
+ name = record.zone.domain
+
+ name += '.'
+
+ ttl = record.extra['ttl'] if 'ttl' in record.extra else record.zone.ttl
+ ttl = str(ttl)
+ data = record.data
+
+ if record.type in [RecordType.CNAME, RecordType.DNAME, RecordType.MX,
+ RecordType.PTR, RecordType.SRV]:
+ # Make sure trailing dot is present
+ if data[len(data) - 1] != '.':
+ data += '.'
+
+ if record.type in [RecordType.TXT, RecordType.SPF] and ' ' in data:
+ # Escape the quotes
+ data = data.replace('"', '\\"')
+
+ # Quote the string
+ data = '"%s"' % (data)
+
+ if record.type in [RecordType.MX, RecordType.SRV]:
+ priority = str(record.extra['priority'])
+ parts = [name, ttl, 'IN', record.type, priority, data]
+ else:
+ parts = [name, ttl, 'IN', record.type, data]
+
+ line = '\t'.join(parts)
+ return line
+
+ def _string_to_record_type(self, string):
+ """
+ Return a string representation of a DNS record type to a
+ libcloud RecordType ENUM.
+
+ :rtype: ``str``
+ """
+ string = string.upper()
+ record_type = getattr(RecordType, string)
+ return record_type
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/__init__.py b/awx/lib/site-packages/libcloud/dns/drivers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/dummy.py b/awx/lib/site-packages/libcloud/dns/drivers/dummy.py
new file mode 100644
index 0000000000..04da354a61
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/dummy.py
@@ -0,0 +1,218 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.dns.base import DNSDriver, Zone, Record
+from libcloud.dns.types import RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
+from libcloud.dns.types import RecordDoesNotExistError
+from libcloud.dns.types import RecordAlreadyExistsError
+
+
+class DummyDNSDriver(DNSDriver):
+ """
+ Dummy DNS driver.
+
+ >>> from libcloud.dns.drivers.dummy import DummyDNSDriver
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> driver.name
+ 'Dummy DNS Provider'
+ """
+
+ name = 'Dummy DNS Provider'
+ website = 'http://example.com'
+
+ def __init__(self, api_key, api_secret):
+ """
+ :param api_key: API key or username to used (required)
+ :type api_key: ``str``
+
+ :param api_secret: Secret password to be used (required)
+ :type api_secret: ``str``
+
+ :rtype: ``None``
+ """
+ self._zones = {}
+
+ def list_record_types(self):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> driver.list_record_types()
+ ['A']
+
+ @inherits: :class:`DNSDriver.list_record_types`
+ """
+ return [RecordType.A]
+
+ def list_zones(self):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> driver.list_zones()
+ []
+
+ @inherits: :class:`DNSDriver.list_zones`
+ """
+
+ return [zone['zone'] for zone in list(self._zones.values())]
+
+ def list_records(self, zone):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> zone = driver.create_zone(domain='apache.org', type='master',
+ ... ttl=100)
+ >>> list(zone.list_records())
+ []
+ >>> record = driver.create_record(name='libcloud', zone=zone,
+ ... type=RecordType.A, data='127.0.0.1')
+ >>> list(zone.list_records()) #doctest: +ELLIPSIS
+ []
+ """
+ return self._zones[zone.id]['records'].values()
+
+ def get_zone(self, zone_id):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> driver.get_zone(zone_id='foobar')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ZoneDoesNotExistError:
+
+ @inherits: :class:`DNSDriver.get_zone`
+ """
+
+ if zone_id not in self._zones:
+ raise ZoneDoesNotExistError(driver=self, value=None,
+ zone_id=zone_id)
+
+ return self._zones[zone_id]['zone']
+
+ def get_record(self, zone_id, record_id):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> driver.get_record(zone_id='doesnotexist', record_id='exists')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ZoneDoesNotExistError:
+
+ @inherits: :class:`DNSDriver.get_record`
+ """
+
+ self.get_zone(zone_id=zone_id)
+ zone_records = self._zones[zone_id]['records']
+
+ if record_id not in zone_records:
+ raise RecordDoesNotExistError(record_id=record_id, value=None,
+ driver=self)
+
+ return zone_records[record_id]
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> zone = driver.create_zone(domain='apache.org', type='master',
+ ... ttl=100)
+ >>> zone
+
+ >>> zone = driver.create_zone(domain='apache.org', type='master',
+ ... ttl=100)
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ZoneAlreadyExistsError:
+
+ @inherits: :class:`DNSDriver.create_zone`
+ """
+
+ id = 'id-%s' % (domain)
+
+ if id in self._zones:
+ raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self)
+
+ zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={},
+ driver=self)
+ self._zones[id] = {'zone': zone,
+ 'records': {}}
+ return zone
+
+ def create_record(self, name, zone, type, data, extra=None):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> zone = driver.create_zone(domain='apache.org', type='master',
+ ... ttl=100)
+ >>> record = driver.create_record(name='libcloud', zone=zone,
+ ... type=RecordType.A, data='127.0.0.1')
+ >>> record #doctest: +ELLIPSIS
+
+ >>> record = driver.create_record(name='libcloud', zone=zone,
+ ... type=RecordType.A, data='127.0.0.1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ RecordAlreadyExistsError:
+
+ @inherits: :class:`DNSDriver.create_record`
+ """
+ id = 'id-%s' % (name)
+
+ zone = self.get_zone(zone_id=zone.id)
+
+ if id in self._zones[zone.id]['records']:
+ raise RecordAlreadyExistsError(record_id=id, value=None,
+ driver=self)
+
+ record = Record(id=id, name=name, type=type, data=data, extra=extra,
+ zone=zone, driver=self)
+ self._zones[zone.id]['records'][id] = record
+ return record
+
+ def delete_zone(self, zone):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> zone = driver.create_zone(domain='apache.org', type='master',
+ ... ttl=100)
+ >>> driver.delete_zone(zone)
+ True
+ >>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ZoneDoesNotExistError:
+
+ @inherits: :class:`DNSDriver.delete_zone`
+ """
+ self.get_zone(zone_id=zone.id)
+
+ del self._zones[zone.id]
+ return True
+
+ def delete_record(self, record):
+ """
+ >>> driver = DummyDNSDriver('key', 'secret')
+ >>> zone = driver.create_zone(domain='apache.org', type='master',
+ ... ttl=100)
+ >>> record = driver.create_record(name='libcloud', zone=zone,
+ ... type=RecordType.A, data='127.0.0.1')
+ >>> driver.delete_record(record)
+ True
+ >>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ RecordDoesNotExistError:
+
+ @inherits: :class:`DNSDriver.delete_record`
+ """
+ self.get_record(zone_id=record.zone.id, record_id=record.id)
+
+ del self._zones[record.zone.id]['records'][record.id]
+ return True
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/gandi.py b/awx/lib/site-packages/libcloud/dns/drivers/gandi.py
new file mode 100644
index 0000000000..48a6ed5d9e
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/gandi.py
@@ -0,0 +1,270 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import with_statement
+
+__all__ = [
+ 'GandiDNSDriver'
+]
+
+from libcloud.common.gandi import BaseGandiDriver, GandiConnection
+from libcloud.common.gandi import GandiResponse
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import RecordError
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+
+
+TTL_MIN = 30
+TTL_MAX = 2592000 # 30 days
+
+
+class NewZoneVersion(object):
+ """
+ Changes to a zone in the Gandi DNS service need to be wrapped in a new
+ version object. The changes are made to the new version, then that
+ version is made active.
+
+ In effect, this is a transaction.
+
+ Any calls made inside this context manager will be applied to a new version
+ id. If your changes are succesful (and only if they are successful) they
+ are activated.
+ """
+
+ def __init__(self, driver, zone):
+ self.driver = driver
+ self.connection = driver.connection
+ self.zone = zone
+
+ def __enter__(self):
+ zid = int(self.zone.id)
+ self.connection.set_context({'zone_id': self.zone.id})
+ vid = self.connection.request('domain.zone.version.new', zid).object
+ self.vid = vid
+ return vid
+
+ def __exit__(self, type, value, traceback):
+ if not traceback:
+ zid = int(self.zone.id)
+ con = self.connection
+ con.set_context({'zone_id': self.zone.id})
+ con.request('domain.zone.version.set', zid, self.vid).object
+
+
+class GandiDNSResponse(GandiResponse):
+ exceptions = {
+ 581042: ZoneDoesNotExistError,
+ }
+
+
+class GandiDNSConnection(GandiConnection):
+ responseCls = GandiDNSResponse
+
+
+class GandiDNSDriver(BaseGandiDriver, DNSDriver):
+ """
+ API reference can be found at:
+
+ http://doc.rpc.gandi.net/domain/reference.html
+ """
+
+ type = Provider.GANDI
+ name = 'Gandi DNS'
+ website = 'http://www.gandi.net/domain'
+
+ connectionCls = GandiDNSConnection
+
+ RECORD_TYPE_MAP = {
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.LOC: 'LOC',
+ RecordType.MX: 'MX',
+ RecordType.NS: 'NS',
+ RecordType.SPF: 'SPF',
+ RecordType.SRV: 'SRV',
+ RecordType.TXT: 'TXT',
+ RecordType.WKS: 'WKS',
+ }
+
+ def _to_zone(self, zone):
+ return Zone(
+ id=str(zone['id']),
+ domain=zone['name'],
+ type='master',
+ ttl=0,
+ driver=self,
+ extra={}
+ )
+
+ def _to_zones(self, zones):
+ ret = []
+ for z in zones:
+ ret.append(self._to_zone(z))
+ return ret
+
+ def list_zones(self):
+ zones = self.connection.request('domain.zone.list')
+ return self._to_zones(zones.object)
+
+ def get_zone(self, zone_id):
+ zid = int(zone_id)
+ self.connection.set_context({'zone_id': zone_id})
+ zone = self.connection.request('domain.zone.info', zid)
+ return self._to_zone(zone.object)
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ params = {
+ 'name': domain,
+ }
+ info = self.connection.request('domain.zone.create', params)
+ return self._to_zone(info.object)
+
+ def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
+ zid = int(zone.id)
+ params = {'name': domain}
+ self.connection.set_context({'zone_id': zone.id})
+ zone = self.connection.request('domain.zone.update', zid, params)
+ return self._to_zone(zone.object)
+
+ def delete_zone(self, zone):
+ zid = int(zone.id)
+ self.connection.set_context({'zone_id': zone.id})
+ res = self.connection.request('domain.zone.delete', zid)
+ return res.object
+
+ def _to_record(self, record, zone):
+ return Record(
+ id='%s:%s' % (record['type'], record['name']),
+ name=record['name'],
+ type=self._string_to_record_type(record['type']),
+ data=record['value'],
+ zone=zone,
+ driver=self,
+ extra={'ttl': record['ttl']}
+ )
+
+ def _to_records(self, records, zone):
+ retval = []
+ for r in records:
+ retval.append(self._to_record(r, zone))
+ return retval
+
+ def list_records(self, zone):
+ zid = int(zone.id)
+ self.connection.set_context({'zone_id': zone.id})
+ records = self.connection.request('domain.zone.record.list', zid, 0)
+ return self._to_records(records.object, zone)
+
+ def get_record(self, zone_id, record_id):
+ zid = int(zone_id)
+ record_type, name = record_id.split(':', 1)
+ filter_opts = {
+ 'name': name,
+ 'type': record_type
+ }
+ self.connection.set_context({'zone_id': zone_id})
+ records = self.connection.request('domain.zone.record.list',
+ zid, 0, filter_opts).object
+
+ if len(records) == 0:
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=record_id)
+
+ return self._to_record(records[0], self.get_zone(zone_id))
+
+ def _validate_record(self, record_id, name, record_type, data, extra):
+ if len(data) > 1024:
+ raise RecordError('Record data must be <= 1024 characters',
+ driver=self, record_id=record_id)
+ if extra and 'ttl' in extra:
+ if extra['ttl'] < TTL_MIN:
+ raise RecordError('TTL must be at least 30 seconds',
+ driver=self, record_id=record_id)
+ if extra['ttl'] > TTL_MAX:
+ raise RecordError('TTL must not excdeed 30 days',
+ driver=self, record_id=record_id)
+
+ def create_record(self, name, zone, type, data, extra=None):
+ self._validate_record(None, name, type, data, extra)
+
+ zid = int(zone.id)
+
+ create = {
+ 'name': name,
+ 'type': self.RECORD_TYPE_MAP[type],
+ 'value': data
+ }
+
+ if 'ttl' in extra:
+ create['ttl'] = extra['ttl']
+
+ with NewZoneVersion(self, zone) as vid:
+ con = self.connection
+ con.set_context({'zone_id': zone.id})
+ rec = con.request('domain.zone.record.add',
+ zid, vid, create).object
+
+ return self._to_record(rec, zone)
+
+ def update_record(self, record, name, type, data, extra):
+ self._validate_record(record.id, name, type, data, extra)
+
+ filter_opts = {
+ 'name': record.name,
+ 'type': self.RECORD_TYPE_MAP[record.type]
+ }
+
+ update = {
+ 'name': name,
+ 'type': self.RECORD_TYPE_MAP[type],
+ 'value': data
+ }
+
+ if 'ttl' in extra:
+ update['ttl'] = extra['ttl']
+
+ zid = int(record.zone.id)
+
+ with NewZoneVersion(self, record.zone) as vid:
+ con = self.connection
+ con.set_context({'zone_id': record.zone.id})
+ con.request('domain.zone.record.delete',
+ zid, vid, filter_opts)
+ res = con.request('domain.zone.record.add',
+ zid, vid, update).object
+
+ return self._to_record(res, record.zone)
+
+ def delete_record(self, record):
+ zid = int(record.zone.id)
+
+ filter_opts = {
+ 'name': record.name,
+ 'type': self.RECORD_TYPE_MAP[record.type]
+ }
+
+ with NewZoneVersion(self, record.zone) as vid:
+ con = self.connection
+ con.set_context({'zone_id': record.zone.id})
+ count = con.request('domain.zone.record.delete',
+ zid, vid, filter_opts).object
+
+ if count == 1:
+ return True
+
+ raise RecordDoesNotExistError(value='No such record', driver=self,
+ record_id=record.id)
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/google.py b/awx/lib/site-packages/libcloud/dns/drivers/google.py
new file mode 100644
index 0000000000..ca3e5f06da
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/google.py
@@ -0,0 +1,345 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'GoogleDNSDriver'
+]
+
+API_VERSION = 'v1beta1'
+
+import re
+from libcloud.common.google import GoogleResponse, GoogleBaseConnection
+from libcloud.common.google import ResourceNotFoundError
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+
+
+class GoogleDNSResponse(GoogleResponse):
+ pass
+
+
+class GoogleDNSConnection(GoogleBaseConnection):
+ host = "www.googleapis.com"
+ responseCls = GoogleDNSResponse
+
+ def __init__(self, user_id, key, secure, auth_type=None,
+ credential_file=None, project=None, **kwargs):
+ super(GoogleDNSConnection, self).\
+ __init__(user_id, key, secure=secure, auth_type=auth_type,
+ credential_file=credential_file, **kwargs)
+ self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project)
+
+
+class GoogleDNSDriver(DNSDriver):
+ type = Provider.GOOGLE
+ name = 'Google DNS'
+ connectionCls = GoogleDNSConnection
+ website = 'https://cloud.google.com/'
+
+ RECORD_TYPE_MAP = {
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.MX: 'MX',
+ RecordType.NS: 'NS',
+ RecordType.PTR: 'PTR',
+ RecordType.SOA: 'SOA',
+ RecordType.SPF: 'SPF',
+ RecordType.SRV: 'SRV',
+ RecordType.TXT: 'TXT',
+ }
+
+ def __init__(self, user_id, key, project=None, auth_type=None, scopes=None,
+ **kwargs):
+ self.auth_type = auth_type
+ self.project = project
+ self.scopes = scopes
+ if not self.project:
+ raise ValueError('Project name must be specified using '
+ '"project" keyword.')
+ super(GoogleDNSDriver, self).__init__(user_id, key, scopes, **kwargs)
+
+ def iterate_zones(self):
+ """
+ Return a generator to iterate over available zones.
+
+ :rtype: ``generator`` of :class:`Zone`
+ """
+ return self._get_more('zones')
+
+ def iterate_records(self, zone):
+ """
+ Return a generator to iterate over records for the provided zone.
+
+ :param zone: Zone to list records for.
+ :type zone: :class:`Zone`
+
+ :rtype: ``generator`` of :class:`Record`
+ """
+ return self._get_more('records', zone=zone)
+
+ def get_zone(self, zone_id):
+ """
+ Return a Zone instance.
+
+ :param zone_id: ID of the required zone
+ :type zone_id: ``str``
+
+ :rtype: :class:`Zone`
+ """
+ request = '/managedZones/%s' % (zone_id)
+
+ try:
+ response = self.connection.request(request, method='GET').object
+ except ResourceNotFoundError:
+ raise ZoneDoesNotExistError(value='',
+ driver=self.connection.driver,
+ zone_id=zone_id)
+
+ return self._to_zone(response)
+
+ def get_record(self, zone_id, record_id):
+ """
+ Return a Record instance.
+
+ :param zone_id: ID of the required zone
+ :type zone_id: ``str``
+
+ :param record_id: ID of the required record
+ :type record_id: ``str``
+
+ :rtype: :class:`Record`
+ """
+ (record_type, record_name) = record_id.split(':', 1)
+
+ params = {
+ 'name': record_name,
+ 'type': record_type,
+ }
+
+ request = '/managedZones/%s/rrsets' % (zone_id)
+
+ try:
+ response = self.connection.request(request, method='GET',
+ params=params).object
+ except ResourceNotFoundError:
+ raise ZoneDoesNotExistError(value='',
+ driver=self.connection.driver,
+ zone_id=zone_id)
+
+ if len(response['rrsets']) > 0:
+ zone = self.get_zone(zone_id)
+ return self._to_record(response['rrsets'][0], zone)
+
+ raise RecordDoesNotExistError(value='', driver=self.connection.driver,
+ record_id=record_id)
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ """
+ Create a new zone.
+
+ :param domain: Zone domain name (e.g. example.com.) with a \'.\'
+ at the end.
+ :type domain: ``str``
+
+ :param type: Zone type (master is the only one supported).
+ :type type: ``str``
+
+ :param ttl: TTL for new records. (unused)
+ :type ttl: ``int``
+
+ :param extra: Extra attributes (driver specific). (optional)
+ :type extra: ``dict``
+
+ :rtype: :class:`Zone`
+ """
+ name = None
+ description = ''
+
+ if extra:
+ description = extra.get('description')
+ name = extra.get('name')
+
+ if name is None:
+ name = self._cleanup_domain(domain)
+
+ data = {
+ 'dnsName': domain,
+ 'name': name,
+ 'description': description,
+ }
+
+ request = '/managedZones'
+ response = self.connection.request(request, method='POST',
+ data=data).object
+ return self._to_zone(response)
+
+ def create_record(self, name, zone, type, data, extra=None):
+ """
+ Create a new record.
+
+ :param name: Record name fully qualified, with a \'.\' at the end.
+ :type name: ``str``
+
+ :param zone: Zone where the requested record is created.
+ :type zone: :class:`Zone`
+
+ :param type: DNS record type (A, AAAA, ...).
+ :type type: :class:`RecordType`
+
+ :param data: Data for the record (depends on the record type).
+ :type data: ``str``
+
+ :param extra: Extra attributes. (optional)
+ :type extra: ``dict``
+
+ :rtype: :class:`Record`
+ """
+ ttl = data.get('ttl', None)
+ rrdatas = data.get('rrdatas', [])
+
+ data = {
+ 'additions': [
+ {
+ 'name': name,
+ 'type': type,
+ 'ttl': int(ttl),
+ 'rrdatas': rrdatas,
+ }
+ ]
+ }
+ request = '/managedZones/%s/changes' % (zone.id)
+ response = self.connection.request(request, method='POST',
+ data=data).object
+ return self._to_record(response['additions'][0], zone)
+
+ def delete_zone(self, zone):
+ """
+ Delete a zone.
+
+ Note: This will delete all the records belonging to this zone.
+
+ :param zone: Zone to delete.
+ :type zone: :class:`Zone`
+
+ :rtype: ``bool``
+ """
+ request = '/managedZones/%s' % (zone.id)
+ response = self.connection.request(request, method='DELETE')
+ return response.success()
+
+ def delete_record(self, record):
+ """
+ Delete a record.
+
+ :param record: Record to delete.
+ :type record: :class:`Record`
+
+ :rtype: ``bool``
+ """
+ data = {
+ 'deletions': [
+ {
+ 'name': record.name,
+ 'type': record.type,
+ 'rrdatas': record.data['rrdatas'],
+ 'ttl': record.data['ttl']
+ }
+ ]
+ }
+ request = '/managedZones/%s/changes' % (record.zone.id)
+ response = self.connection.request(request, method='POST',
+ data=data)
+ return response.success()
+
+ def _get_more(self, rtype, **kwargs):
+ last_key = None
+ exhausted = False
+ while not exhausted:
+ items, last_key, exhausted = self._get_data(rtype, last_key,
+ **kwargs)
+ for item in items:
+ yield item
+
+ def _get_data(self, rtype, last_key, **kwargs):
+ params = {}
+
+ if last_key:
+ params['pageToken'] = last_key
+
+ if rtype == 'zones':
+ request = '/managedZones'
+ transform_func = self._to_zones
+ r_key = 'managedZones'
+ elif rtype == 'records':
+ zone = kwargs['zone']
+ request = '/managedZones/%s/rrsets' % (zone.id)
+ transform_func = self._to_records
+ r_key = 'rrsets'
+
+ response = self.connection.request(request, method='GET',
+ params=params,)
+
+ if response.success():
+ nextpage = response.object.get('nextPageToken', None)
+ items = transform_func(response.object.get(r_key), **kwargs)
+ exhausted = False if nextpage is not None else True
+ return items, nextpage, exhausted
+ else:
+ return [], None, True
+
+ def _ex_connection_class_kwargs(self):
+ return {'auth_type': self.auth_type,
+ 'project': self.project,
+ 'scopes': self.scopes}
+
+ def _to_zones(self, response):
+ zones = []
+ for r in response:
+ zones.append(self._to_zone(r))
+ return zones
+
+ def _to_zone(self, r):
+ extra = {}
+
+ if 'description' in r:
+ extra['description'] = r.get('description')
+
+ extra['creationTime'] = r.get('creationTime')
+ extra['nameServers'] = r.get('nameServers')
+ extra['id'] = r.get('id')
+
+ return Zone(id=r['name'], domain=r['dnsName'],
+ type='master', ttl=0, driver=self, extra=extra)
+
+ def _to_records(self, response, zone):
+ records = []
+ for r in response:
+ records.append(self._to_record(r, zone))
+ return records
+
+ def _to_record(self, r, zone):
+ record_id = '%s:%s' % (r['type'], r['name'])
+ return Record(id=record_id, name=r['name'],
+ type=r['type'], data=r, zone=zone,
+ driver=self, extra={})
+
+ def _cleanup_domain(self, domain):
+ # name can only contain lower case alphanumeric characters and hyphens
+ domain = re.sub(r'[^a-zA-Z0-9-]', '-', domain)
+ if domain[-1] == '-':
+ domain = domain[:-1]
+ return domain
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/hostvirtual.py b/awx/lib/site-packages/libcloud/dns/drivers/hostvirtual.py
new file mode 100644
index 0000000000..1d9eec2ce6
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/hostvirtual.py
@@ -0,0 +1,243 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'HostVirtualDNSDriver'
+]
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.misc import merge_valid_keys, get_new_obj
+from libcloud.common.hostvirtual import HostVirtualResponse
+from libcloud.common.hostvirtual import HostVirtualConnection
+from libcloud.compute.drivers.hostvirtual import API_ROOT
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+
+try:
+ import simplejson as json
+except:
+ import json
+
+VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl']
+
+
+class HostVirtualDNSResponse(HostVirtualResponse):
+ def parse_error(self):
+ context = self.connection.context
+ status = int(self.status)
+
+ if status == httplib.NOT_FOUND:
+ if context['resource'] == 'zone':
+ raise ZoneDoesNotExistError(value='', driver=self,
+ zone_id=context['id'])
+ elif context['resource'] == 'record':
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=context['id'])
+
+ super(HostVirtualDNSResponse, self).parse_error()
+ return self.body
+
+
+class HostVirtualDNSConnection(HostVirtualConnection):
+ responseCls = HostVirtualDNSResponse
+
+
+class HostVirtualDNSDriver(DNSDriver):
+ type = Provider.HOSTVIRTUAL
+ name = 'Host Virtual DNS'
+ website = 'http://www.vr.org/'
+ connectionCls = HostVirtualDNSConnection
+
+ RECORD_TYPE_MAP = {
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.MX: 'MX',
+ RecordType.NS: 'SPF',
+ RecordType.SRV: 'SRV',
+ RecordType.TXT: 'TXT',
+ }
+
+ def __init__(self, key, secure=True, host=None, port=None):
+ super(HostVirtualDNSDriver, self).__init__(key=key, secure=secure,
+ host=host, port=port)
+
+ def _to_zones(self, items):
+ zones = []
+ for item in items:
+ zones.append(self._to_zone(item))
+ return zones
+
+ def _to_zone(self, item):
+ extra = {}
+ if 'records' in item:
+ extra['records'] = item['records']
+ if item['type'] == 'NATIVE':
+ item['type'] = 'master'
+ zone = Zone(id=item['id'], domain=item['name'],
+ type=item['type'], ttl=item['ttl'],
+ driver=self, extra=extra)
+ return zone
+
+ def _to_records(self, items, zone=None):
+ records = []
+
+ for item in items:
+ records.append(self._to_record(item=item, zone=zone))
+ return records
+
+ def _to_record(self, item, zone=None):
+ extra = {'ttl': item['ttl']}
+ type = self._string_to_record_type(item['type'])
+ record = Record(id=item['id'], name=item['name'],
+ type=type, data=item['content'],
+ zone=zone, driver=self, extra=extra)
+ return record
+
+ def list_zones(self):
+ result = self.connection.request(
+ API_ROOT + '/dns/zones/').object
+ zones = self._to_zones(result)
+ return zones
+
+ def list_records(self, zone):
+ params = {'id': zone.id}
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ result = self.connection.request(
+ API_ROOT + '/dns/records/', params=params).object
+ records = self._to_records(items=result, zone=zone)
+ return records
+
+ def get_zone(self, zone_id):
+ params = {'id': zone_id}
+ self.connection.set_context({'resource': 'zone', 'id': zone_id})
+ result = self.connection.request(
+ API_ROOT + '/dns/zone/', params=params).object
+ if 'id' not in result:
+ raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
+ zone = self._to_zone(result)
+ return zone
+
+ def get_record(self, zone_id, record_id):
+ zone = self.get_zone(zone_id=zone_id)
+ params = {'id': record_id}
+ self.connection.set_context({'resource': 'record', 'id': record_id})
+ result = self.connection.request(
+ API_ROOT + '/dns/record/', params=params).object
+ if 'id' not in result:
+ raise RecordDoesNotExistError(value='',
+ driver=self, record_id=record_id)
+ record = self._to_record(item=result, zone=zone)
+ return record
+
+ def delete_zone(self, zone):
+ params = {'zone_id': zone.id}
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ result = self.connection.request(
+ API_ROOT + '/dns/zone/', params=params, method='DELETE').object
+ return bool(result)
+
+ def delete_record(self, record):
+ params = {'id': record.id}
+ self.connection.set_context({'resource': 'record', 'id': record.id})
+ result = self.connection.request(
+ API_ROOT + '/dns/record/', params=params, method='DELETE').object
+
+ return bool(result)
+
+ def create_zone(self, domain, type='NATIVE', ttl=None, extra=None):
+ if type == 'master':
+ type = 'NATIVE'
+ elif type == 'slave':
+ type = 'SLAVE'
+ params = {'name': domain, 'type': type, 'ttl': ttl}
+ result = self.connection.request(
+ API_ROOT + '/dns/zone/',
+ data=json.dumps(params), method='POST').object
+ extra = {
+ 'soa': result['soa'],
+ 'ns': result['ns']
+ }
+ zone = Zone(id=result['id'], domain=domain,
+ type=type, ttl=ttl, extra=extra, driver=self)
+ return zone
+
+ def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
+ params = {'id': zone.id}
+ if domain:
+ params['name'] = domain
+ if type:
+ params['type'] = type
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ self.connection.request(API_ROOT + '/dns/zone/',
+ data=json.dumps(params), method='PUT').object
+ updated_zone = get_new_obj(
+ obj=zone, klass=Zone,
+ attributes={
+ 'domain': domain,
+ 'type': type,
+ 'ttl': ttl,
+ 'extra': extra
+ })
+ return updated_zone
+
+ def create_record(self, name, zone, type, data, extra=None):
+ params = {
+ 'name': name,
+ 'type': self.RECORD_TYPE_MAP[type],
+ 'domain_id': zone.id,
+ 'content': data
+ }
+ merged = merge_valid_keys(
+ params=params,
+ valid_keys=VALID_RECORD_EXTRA_PARAMS,
+ extra=extra
+ )
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ result = self.connection.request(
+ API_ROOT + '/dns/record/',
+ data=json.dumps(params), method='POST').object
+ record = Record(id=result['id'], name=name,
+ type=type, data=data,
+ extra=merged, zone=zone, driver=self)
+ return record
+
+ def update_record(self, record, name=None, type=None,
+ data=None, extra=None):
+ params = {
+ 'domain_id': record.zone.id,
+ 'record_id': record.id
+ }
+ if name:
+ params['name'] = name
+ if data:
+ params['content'] = data
+ if type is not None:
+ params['type'] = self.RECORD_TYPE_MAP[type]
+ merged = merge_valid_keys(
+ params=params,
+ valid_keys=VALID_RECORD_EXTRA_PARAMS,
+ extra=extra
+ )
+ self.connection.set_context({'resource': 'record', 'id': record.id})
+ self.connection.request(API_ROOT + '/dns/record/',
+ data=json.dumps(params), method='PUT').object
+ updated_record = get_new_obj(
+ obj=record, klass=Record, attributes={
+ 'name': name, 'data': data,
+ 'type': type,
+ 'extra': merged
+ })
+ return updated_record
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/linode.py b/awx/lib/site-packages/libcloud/dns/drivers/linode.py
new file mode 100644
index 0000000000..f0fdbdc756
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/linode.py
@@ -0,0 +1,272 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'LinodeDNSDriver'
+]
+
+from libcloud.utils.misc import merge_valid_keys, get_new_obj
+from libcloud.common.linode import (API_ROOT, LinodeException,
+ LinodeConnection, LinodeResponse)
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+
+
+VALID_ZONE_EXTRA_PARAMS = ['SOA_Email', 'Refresh_sec', 'Retry_sec',
+ 'Expire_sec', 'status', 'master_ips']
+
+VALID_RECORD_EXTRA_PARAMS = ['Priority', 'Weight', 'Port', 'Protocol',
+ 'TTL_sec']
+
+
+class LinodeDNSResponse(LinodeResponse):
+ def _make_excp(self, error):
+ result = super(LinodeDNSResponse, self)._make_excp(error)
+ if isinstance(result, LinodeException) and result.code == 5:
+ context = self.connection.context
+
+ if context['resource'] == 'zone':
+ result = ZoneDoesNotExistError(value='',
+ driver=self.connection.driver,
+ zone_id=context['id'])
+
+ elif context['resource'] == 'record':
+ result = RecordDoesNotExistError(value='',
+ driver=self.connection.driver,
+ record_id=context['id'])
+ return result
+
+
+class LinodeDNSConnection(LinodeConnection):
+ responseCls = LinodeDNSResponse
+
+
+class LinodeDNSDriver(DNSDriver):
+ type = Provider.LINODE
+ name = 'Linode DNS'
+ website = 'http://www.linode.com/'
+ connectionCls = LinodeDNSConnection
+
+ RECORD_TYPE_MAP = {
+ RecordType.NS: 'NS',
+ RecordType.MX: 'MX',
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.TXT: 'TXT',
+ RecordType.SRV: 'SRV',
+ }
+
+ def list_zones(self):
+ params = {'api_action': 'domain.list'}
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ zones = self._to_zones(data)
+ return zones
+
+ def list_records(self, zone):
+ params = {'api_action': 'domain.resource.list', 'DOMAINID': zone.id}
+
+ self.connection.set_context(context={'resource': 'zone',
+ 'id': zone.id})
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ records = self._to_records(items=data, zone=zone)
+ return records
+
+ def get_zone(self, zone_id):
+ params = {'api_action': 'domain.list', 'DomainID': zone_id}
+ self.connection.set_context(context={'resource': 'zone',
+ 'id': zone_id})
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ zones = self._to_zones(data)
+
+ if len(zones) != 1:
+ raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
+
+ return zones[0]
+
+ def get_record(self, zone_id, record_id):
+ zone = self.get_zone(zone_id=zone_id)
+ params = {'api_action': 'domain.resource.list', 'DomainID': zone_id,
+ 'ResourceID': record_id}
+ self.connection.set_context(context={'resource': 'record',
+ 'id': record_id})
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ records = self._to_records(items=data, zone=zone)
+
+ if len(records) != 1:
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=record_id)
+
+ return records[0]
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ """
+ Create a new zone.
+
+ API docs: http://www.linode.com/api/dns/domain.create
+ """
+ params = {'api_action': 'domain.create', 'Type': type,
+ 'Domain': domain}
+
+ if ttl:
+ params['TTL_sec'] = ttl
+
+ merged = merge_valid_keys(params=params,
+ valid_keys=VALID_ZONE_EXTRA_PARAMS,
+ extra=extra)
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+ zone = Zone(id=data['DomainID'], domain=domain, type=type, ttl=ttl,
+ extra=merged, driver=self)
+ return zone
+
+ def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
+ """
+ Update an existing zone.
+
+ API docs: http://www.linode.com/api/dns/domain.update
+ """
+ params = {'api_action': 'domain.update', 'DomainID': zone.id}
+
+ if type:
+ params['Type'] = type
+
+ if domain:
+ params['Domain'] = domain
+
+ if ttl:
+ params['TTL_sec'] = ttl
+
+ merged = merge_valid_keys(params=params,
+ valid_keys=VALID_ZONE_EXTRA_PARAMS,
+ extra=extra)
+ self.connection.request(API_ROOT, params=params).objects[0]
+ updated_zone = get_new_obj(obj=zone, klass=Zone,
+ attributes={'domain': domain,
+ 'type': type, 'ttl': ttl,
+ 'extra': merged})
+ return updated_zone
+
+ def create_record(self, name, zone, type, data, extra=None):
+ """
+ Create a new record.
+
+ API docs: http://www.linode.com/api/dns/domain.resource.create
+ """
+ params = {'api_action': 'domain.resource.create', 'DomainID': zone.id,
+ 'Name': name, 'Target': data,
+ 'Type': self.RECORD_TYPE_MAP[type]}
+ merged = merge_valid_keys(params=params,
+ valid_keys=VALID_RECORD_EXTRA_PARAMS,
+ extra=extra)
+
+ result = self.connection.request(API_ROOT, params=params).objects[0]
+ record = Record(id=result['ResourceID'], name=name, type=type,
+ data=data, extra=merged, zone=zone, driver=self)
+ return record
+
+ def update_record(self, record, name=None, type=None, data=None,
+ extra=None):
+ """
+ Update an existing record.
+
+ API docs: http://www.linode.com/api/dns/domain.resource.update
+ """
+ params = {'api_action': 'domain.resource.update',
+ 'ResourceID': record.id, 'DomainID': record.zone.id}
+
+ if name:
+ params['Name'] = name
+
+ if data:
+ params['Target'] = data
+
+ if type is not None:
+ params['Type'] = self.RECORD_TYPE_MAP[type]
+
+ merged = merge_valid_keys(params=params,
+ valid_keys=VALID_RECORD_EXTRA_PARAMS,
+ extra=extra)
+
+ self.connection.request(API_ROOT, params=params).objects[0]
+ updated_record = get_new_obj(obj=record, klass=Record,
+ attributes={'name': name, 'data': data,
+ 'type': type,
+ 'extra': merged})
+ return updated_record
+
+ def delete_zone(self, zone):
+ params = {'api_action': 'domain.delete', 'DomainID': zone.id}
+
+ self.connection.set_context(context={'resource': 'zone',
+ 'id': zone.id})
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+
+ return 'DomainID' in data
+
+ def delete_record(self, record):
+ params = {'api_action': 'domain.resource.delete',
+ 'DomainID': record.zone.id, 'ResourceID': record.id}
+
+ self.connection.set_context(context={'resource': 'record',
+ 'id': record.id})
+ data = self.connection.request(API_ROOT, params=params).objects[0]
+
+ return 'ResourceID' in data
+
+ def _to_zones(self, items):
+ """
+ Convert a list of items to the Zone objects.
+ """
+ zones = []
+
+ for item in items:
+ zones.append(self._to_zone(item))
+
+ return zones
+
+ def _to_zone(self, item):
+ """
+ Build an Zone object from the item dictionary.
+ """
+ extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'],
+ 'description': item['DESCRIPTION']}
+ zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'],
+ type=item['TYPE'], ttl=item['TTL_SEC'], driver=self,
+ extra=extra)
+ return zone
+
+ def _to_records(self, items, zone=None):
+ """
+ Convert a list of items to the Record objects.
+ """
+ records = []
+
+ for item in items:
+ records.append(self._to_record(item=item, zone=zone))
+
+ return records
+
+ def _to_record(self, item, zone=None):
+ """
+ Build a Record object from the item dictionary.
+ """
+ extra = {'protocol': item['PROTOCOL'], 'ttl_sec': item['TTL_SEC'],
+ 'port': item['PORT'], 'weight': item['WEIGHT']}
+ type = self._string_to_record_type(item['TYPE'])
+ record = Record(id=item['RESOURCEID'], name=item['NAME'], type=type,
+ data=item['TARGET'], zone=zone, driver=self,
+ extra=extra)
+ return record
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/rackspace.py b/awx/lib/site-packages/libcloud/dns/drivers/rackspace.py
new file mode 100644
index 0000000000..d71f4c5975
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/rackspace.py
@@ -0,0 +1,450 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from libcloud.common.openstack import OpenStackDriverMixin
+
+__all__ = [
+ 'RackspaceUSDNSDriver',
+ 'RackspaceUKDNSDriver'
+]
+
+from libcloud.utils.py3 import httplib
+import copy
+
+from libcloud.common.base import PollingConnection
+from libcloud.common.types import LibcloudError
+from libcloud.utils.misc import merge_valid_keys, get_new_obj
+from libcloud.common.rackspace import AUTH_URL
+from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
+from libcloud.compute.drivers.openstack import OpenStack_1_1_Response
+
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+
+VALID_ZONE_EXTRA_PARAMS = ['email', 'comment', 'ns1']
+VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment', 'priority']
+
+
+class RackspaceDNSResponse(OpenStack_1_1_Response):
+ """
+ Rackspace DNS Response class.
+ """
+
+ def parse_error(self):
+ status = int(self.status)
+ context = self.connection.context
+ body = self.parse_body()
+
+ if status == httplib.NOT_FOUND:
+ if context['resource'] == 'zone':
+ raise ZoneDoesNotExistError(value='', driver=self,
+ zone_id=context['id'])
+ elif context['resource'] == 'record':
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=context['id'])
+ if body:
+ if 'code' and 'message' in body:
+ err = '%s - %s (%s)' % (body['code'], body['message'],
+ body['details'])
+ return err
+ elif 'validationErrors' in body:
+ errors = [m for m in body['validationErrors']['messages']]
+ err = 'Validation errors: %s' % ', '.join(errors)
+ return err
+
+ raise LibcloudError('Unexpected status code: %s' % (status))
+
+
+class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection):
+ """
+ Rackspace DNS Connection class.
+ """
+
+ responseCls = RackspaceDNSResponse
+ XML_NAMESPACE = None
+ poll_interval = 2.5
+ timeout = 30
+
+ auth_url = AUTH_URL
+ _auth_version = '2.0'
+
+ def __init__(self, *args, **kwargs):
+ self.region = kwargs.pop('region', None)
+ super(RackspaceDNSConnection, self).__init__(*args, **kwargs)
+
+ def get_poll_request_kwargs(self, response, context, request_kwargs):
+ job_id = response.object['jobId']
+ kwargs = {'action': '/status/%s' % (job_id),
+ 'params': {'showDetails': True}}
+ return kwargs
+
+ def has_completed(self, response):
+ status = response.object['status']
+ if status == 'ERROR':
+ data = response.object['error']
+
+ if 'code' and 'message' in data:
+ message = '%s - %s (%s)' % (data['code'], data['message'],
+ data['details'])
+ else:
+ message = data['message']
+
+ raise LibcloudError(message,
+ driver=self.driver)
+
+ return status == 'COMPLETED'
+
+ def get_endpoint(self):
+ if '2.0' in self._auth_version:
+ ep = self.service_catalog.get_endpoint(name='cloudDNS',
+ service_type='rax:dns',
+ region=None)
+ else:
+ raise LibcloudError("Auth version %s not supported" %
+ (self._auth_version))
+
+ public_url = ep.get('publicURL', None)
+
+ # This is a nasty hack, but because of how global auth and old accounts
+ # work, there is no way around it.
+ if self.region == 'us':
+ # Old UK account, which only has us endpoint in the catalog
+ public_url = public_url.replace('https://lon.dns.api',
+ 'https://dns.api')
+ if self.region == 'uk':
+ # Old US account, which only has uk endpoint in the catalog
+ public_url = public_url.replace('https://dns.api',
+ 'https://lon.dns.api')
+
+ return public_url
+
+
+class RackspaceDNSDriver(DNSDriver, OpenStackDriverMixin):
+ name = 'Rackspace DNS'
+ website = 'http://www.rackspace.com/'
+ type = Provider.RACKSPACE
+ connectionCls = RackspaceDNSConnection
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='us', **kwargs):
+ if region not in ['us', 'uk']:
+ raise ValueError('Invalid region: %s' % (region))
+
+ OpenStackDriverMixin.__init__(self, **kwargs)
+ super(RackspaceDNSDriver, self).__init__(key=key, secret=secret,
+ host=host, port=port,
+ region=region)
+
+ RECORD_TYPE_MAP = {
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.MX: 'MX',
+ RecordType.NS: 'NS',
+ RecordType.PTR: 'PTR',
+ RecordType.SRV: 'SRV',
+ RecordType.TXT: 'TXT',
+ }
+
+ def iterate_zones(self):
+ offset = 0
+ limit = 100
+ while True:
+ params = {
+ 'limit': limit,
+ 'offset': offset,
+ }
+ response = self.connection.request(
+ action='/domains', params=params).object
+ zones_list = response['domains']
+ for item in zones_list:
+ yield self._to_zone(item)
+
+ if _rackspace_result_has_more(response, len(zones_list), limit):
+ offset += limit
+ else:
+ break
+
+ def iterate_records(self, zone):
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ offset = 0
+ limit = 100
+ while True:
+ params = {
+ 'showRecord': True,
+ 'limit': limit,
+ 'offset': offset,
+ }
+ response = self.connection.request(
+ action='/domains/%s' % (zone.id), params=params).object
+ records_list = response['recordsList']
+ records = records_list['records']
+ for item in records:
+ record = self._to_record(data=item, zone=zone)
+ yield record
+
+ if _rackspace_result_has_more(records_list, len(records), limit):
+ offset += limit
+ else:
+ break
+
+ def get_zone(self, zone_id):
+ self.connection.set_context({'resource': 'zone', 'id': zone_id})
+ response = self.connection.request(action='/domains/%s' % (zone_id))
+ zone = self._to_zone(data=response.object)
+ return zone
+
+ def get_record(self, zone_id, record_id):
+ zone = self.get_zone(zone_id=zone_id)
+ self.connection.set_context({'resource': 'record', 'id': record_id})
+ response = self.connection.request(action='/domains/%s/records/%s' %
+ (zone_id, record_id)).object
+ record = self._to_record(data=response, zone=zone)
+ return record
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ extra = extra if extra else {}
+
+ # Email address is required
+ if 'email' not in extra:
+ raise ValueError('"email" key must be present in extra dictionary')
+
+ payload = {'name': domain, 'emailAddress': extra['email'],
+ 'recordsList': {'records': []}}
+
+ if ttl:
+ payload['ttl'] = ttl
+
+ if 'comment' in extra:
+ payload['comment'] = extra['comment']
+
+ data = {'domains': [payload]}
+ response = self.connection.async_request(action='/domains',
+ method='POST', data=data)
+ zone = self._to_zone(data=response.object['response']['domains'][0])
+ return zone
+
+ def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
+ # Only ttl, comment and email address can be changed
+ extra = extra if extra else {}
+
+ if domain:
+ raise LibcloudError('Domain cannot be changed', driver=self)
+
+ data = {}
+
+ if ttl:
+ data['ttl'] = int(ttl)
+
+ if 'email' in extra:
+ data['emailAddress'] = extra['email']
+
+ if 'comment' in extra:
+ data['comment'] = extra['comment']
+
+ type = type if type else zone.type
+ ttl = ttl if ttl else zone.ttl
+
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ self.connection.async_request(action='/domains/%s' % (zone.id),
+ method='PUT', data=data)
+ merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
+ valid_keys=VALID_ZONE_EXTRA_PARAMS,
+ extra=extra)
+ updated_zone = get_new_obj(obj=zone, klass=Zone,
+ attributes={'type': type,
+ 'ttl': ttl,
+ 'extra': merged})
+ return updated_zone
+
+ def create_record(self, name, zone, type, data, extra=None):
+ # Name must be a FQDN - e.g. if domain is "foo.com" then a record
+ # name is "bar.foo.com"
+ extra = extra if extra else {}
+
+ name = self._to_full_record_name(domain=zone.domain, name=name)
+ data = {'name': name, 'type': self.RECORD_TYPE_MAP[type],
+ 'data': data}
+
+ if 'ttl' in extra:
+ data['ttl'] = int(extra['ttl'])
+
+ if 'priority' in extra:
+ data['priority'] = int(extra['priority'])
+
+ payload = {'records': [data]}
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ response = self.connection.async_request(action='/domains/%s/records'
+ % (zone.id), data=payload,
+ method='POST').object
+ record = self._to_record(data=response['response']['records'][0],
+ zone=zone)
+ return record
+
+ def update_record(self, record, name=None, type=None, data=None,
+ extra=None):
+ # Only data, ttl, and comment attributes can be modified, but name
+ # attribute must always be present.
+ extra = extra if extra else {}
+
+ name = self._to_full_record_name(domain=record.zone.domain,
+ name=record.name)
+ payload = {'name': name}
+
+ if data:
+ payload['data'] = data
+
+ if 'ttl' in extra:
+ payload['ttl'] = extra['ttl']
+
+ if 'comment' in extra:
+ payload['comment'] = extra['comment']
+
+ type = type if type is not None else record.type
+ data = data if data else record.data
+
+ self.connection.set_context({'resource': 'record', 'id': record.id})
+ self.connection.async_request(action='/domains/%s/records/%s' %
+ (record.zone.id, record.id),
+ method='PUT', data=payload)
+
+ merged = merge_valid_keys(params=copy.deepcopy(record.extra),
+ valid_keys=VALID_RECORD_EXTRA_PARAMS,
+ extra=extra)
+ updated_record = get_new_obj(obj=record, klass=Record,
+ attributes={'type': type,
+ 'data': data,
+ 'driver': self,
+ 'extra': merged})
+ return updated_record
+
+ def delete_zone(self, zone):
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ self.connection.async_request(action='/domains/%s' % (zone.id),
+ method='DELETE')
+ return True
+
+ def delete_record(self, record):
+ self.connection.set_context({'resource': 'record', 'id': record.id})
+ self.connection.async_request(action='/domains/%s/records/%s' %
+ (record.zone.id, record.id),
+ method='DELETE')
+ return True
+
+ def _to_zone(self, data):
+ id = data['id']
+ domain = data['name']
+ type = 'master'
+ ttl = data.get('ttl', 0)
+ extra = {}
+
+ if 'emailAddress' in data:
+ extra['email'] = data['emailAddress']
+
+ if 'comment' in data:
+ extra['comment'] = data['comment']
+
+ zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
+ driver=self, extra=extra)
+ return zone
+
+ def _to_record(self, data, zone):
+ id = data['id']
+ fqdn = data['name']
+ name = self._to_partial_record_name(domain=zone.domain, name=fqdn)
+ type = self._string_to_record_type(data['type'])
+ record_data = data['data']
+ extra = {'fqdn': fqdn}
+
+ for key in VALID_RECORD_EXTRA_PARAMS:
+ if key in data:
+ extra[key] = data[key]
+
+ record = Record(id=str(id), name=name, type=type, data=record_data,
+ zone=zone, driver=self, extra=extra)
+ return record
+
+ def _to_full_record_name(self, domain, name):
+ """
+ Build a FQDN from a domain and record name.
+
+ :param domain: Domain name.
+ :type domain: ``str``
+
+ :param name: Record name.
+ :type name: ``str``
+ """
+ if name:
+ name = '%s.%s' % (name, domain)
+ else:
+ name = domain
+
+ return name
+
+ def _to_partial_record_name(self, domain, name):
+ """
+ Remove domain portion from the record name.
+
+ :param domain: Domain name.
+ :type domain: ``str``
+
+ :param name: Full record name (fqdn).
+ :type name: ``str``
+ """
+ if name == domain:
+ # Map "root" record names to None to be consistent with other
+ # drivers
+ return None
+
+ # Strip domain portion
+ name = name.replace('.%s' % (domain), '')
+ return name
+
+ def _ex_connection_class_kwargs(self):
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['region'] = self.region
+ return kwargs
+
+
+class RackspaceUSDNSDriver(RackspaceDNSDriver):
+ name = 'Rackspace DNS (US)'
+ type = Provider.RACKSPACE_US
+
+ def __init__(self, *args, **kwargs):
+ kwargs['region'] = 'us'
+ super(RackspaceUSDNSDriver, self).__init__(*args, **kwargs)
+
+
+class RackspaceUKDNSDriver(RackspaceDNSDriver):
+ name = 'Rackspace DNS (UK)'
+ type = Provider.RACKSPACE_UK
+
+ def __init__(self, *args, **kwargs):
+ kwargs['region'] = 'uk'
+ super(RackspaceUKDNSDriver, self).__init__(*args, **kwargs)
+
+
+def _rackspace_result_has_more(response, result_length, limit):
+ # If rackspace returns less than the limit, then we've reached the end of
+ # the result set.
+ if result_length < limit:
+ return False
+
+ # Paginated results return links to the previous and next sets of data, but
+ # 'next' only exists when there is more to get.
+ for item in response.get('links', ()):
+ if item['rel'] == 'next':
+ return True
+ return False
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/route53.py b/awx/lib/site-packages/libcloud/dns/drivers/route53.py
new file mode 100644
index 0000000000..f1a8f34920
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/route53.py
@@ -0,0 +1,527 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'Route53DNSDriver'
+]
+
+import base64
+import hmac
+import datetime
+import uuid
+import copy
+from libcloud.utils.py3 import httplib
+
+from hashlib import sha1
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.utils.py3 import b, urlencode
+
+from libcloud.utils.xml import findtext, findall, fixxpath
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+from libcloud.common.types import LibcloudError
+from libcloud.common.aws import AWSGenericResponse
+from libcloud.common.base import ConnectionUserAndKey
+
+
+API_VERSION = '2012-02-29'
+API_HOST = 'route53.amazonaws.com'
+API_ROOT = '/%s/' % (API_VERSION)
+
+NAMESPACE = 'https://%s/doc%s' % (API_HOST, API_ROOT)
+
+
+class InvalidChangeBatch(LibcloudError):
+ pass
+
+
+class Route53DNSResponse(AWSGenericResponse):
+ """
+ Amazon Route53 response class.
+ """
+
+ namespace = NAMESPACE
+ xpath = 'Error'
+
+ exceptions = {
+ 'NoSuchHostedZone': ZoneDoesNotExistError,
+ 'InvalidChangeBatch': InvalidChangeBatch,
+ }
+
+
+class Route53Connection(ConnectionUserAndKey):
+ host = API_HOST
+ responseCls = Route53DNSResponse
+
+ def pre_connect_hook(self, params, headers):
+ time_string = datetime.datetime.utcnow() \
+ .strftime('%a, %d %b %Y %H:%M:%S GMT')
+ headers['Date'] = time_string
+ tmp = []
+
+ signature = self._get_aws_auth_b64(self.key, time_string)
+ auth = {'AWSAccessKeyId': self.user_id, 'Signature': signature,
+ 'Algorithm': 'HmacSHA1'}
+
+ for k, v in auth.items():
+ tmp.append('%s=%s' % (k, v))
+
+ headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' + ','.join(tmp)
+
+ return params, headers
+
+ def _get_aws_auth_b64(self, secret_key, time_string):
+ b64_hmac = base64.b64encode(
+ hmac.new(b(secret_key), b(time_string), digestmod=sha1).digest()
+ )
+
+ return b64_hmac.decode('utf-8')
+
+
+class Route53DNSDriver(DNSDriver):
+ type = Provider.ROUTE53
+ name = 'Route53 DNS'
+ website = 'http://aws.amazon.com/route53/'
+ connectionCls = Route53Connection
+
+ RECORD_TYPE_MAP = {
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.MX: 'MX',
+ RecordType.NS: 'NS',
+ RecordType.PTR: 'PTR',
+ RecordType.SOA: 'SOA',
+ RecordType.SPF: 'SPF',
+ RecordType.SRV: 'SRV',
+ RecordType.TXT: 'TXT',
+ }
+
+ def iterate_zones(self):
+ return self._get_more('zones')
+
+ def iterate_records(self, zone):
+ return self._get_more('records', zone=zone)
+
+ def get_zone(self, zone_id):
+ self.connection.set_context({'zone_id': zone_id})
+ uri = API_ROOT + 'hostedzone/' + zone_id
+ data = self.connection.request(uri).object
+ elem = findall(element=data, xpath='HostedZone',
+ namespace=NAMESPACE)[0]
+ return self._to_zone(elem)
+
+ def get_record(self, zone_id, record_id):
+ zone = self.get_zone(zone_id=zone_id)
+ record_type, name = record_id.split(':', 1)
+ if name:
+ full_name = ".".join((name, zone.domain))
+ else:
+ full_name = zone.domain
+ self.connection.set_context({'zone_id': zone_id})
+ params = urlencode({
+ 'name': full_name,
+ 'type': record_type,
+ 'maxitems': '1'
+ })
+ uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params
+ data = self.connection.request(uri).object
+
+ record = self._to_records(data=data, zone=zone)[0]
+
+ # A cute aspect of the /rrset filters is that they are more pagination
+ # hints than filters!!
+ # So will return a result even if its not what you asked for.
+ record_type_num = self._string_to_record_type(record_type)
+ if record.name != name or record.type != record_type_num:
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=record_id)
+
+ return record
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ zone = ET.Element('CreateHostedZoneRequest', {'xmlns': NAMESPACE})
+ ET.SubElement(zone, 'Name').text = domain
+ ET.SubElement(zone, 'CallerReference').text = str(uuid.uuid4())
+
+ if extra and 'Comment' in extra:
+ hzg = ET.SubElement(zone, 'HostedZoneConfig')
+ ET.SubElement(hzg, 'Comment').text = extra['Comment']
+
+ uri = API_ROOT + 'hostedzone'
+ data = ET.tostring(zone)
+ rsp = self.connection.request(uri, method='POST', data=data).object
+
+ elem = findall(element=rsp, xpath='HostedZone', namespace=NAMESPACE)[0]
+ return self._to_zone(elem=elem)
+
+ def delete_zone(self, zone, ex_delete_records=False):
+ self.connection.set_context({'zone_id': zone.id})
+
+ if ex_delete_records:
+ self.ex_delete_all_records(zone=zone)
+
+ uri = API_ROOT + 'hostedzone/%s' % (zone.id)
+ response = self.connection.request(uri, method='DELETE')
+ return response.status in [httplib.OK]
+
+ def create_record(self, name, zone, type, data, extra=None):
+ extra = extra or {}
+ batch = [('CREATE', name, type, data, extra)]
+ self._post_changeset(zone, batch)
+ id = ':'.join((self.RECORD_TYPE_MAP[type], name))
+ return Record(id=id, name=name, type=type, data=data, zone=zone,
+ driver=self, extra=extra)
+
+ def update_record(self, record, name=None, type=None, data=None,
+ extra=None):
+ name = name or record.name
+ type = type or record.type
+ extra = extra or record.extra
+
+ if not extra:
+ extra = record.extra
+
+ # Multiple value records need to be handled specially - we need to
+ # pass values for other records as well
+ multiple_value_record = record.extra.get('_multi_value', False)
+ other_records = record.extra.get('_other_records', [])
+
+ if multiple_value_record and other_records:
+ self._update_multi_value_record(record=record, name=name,
+ type=type, data=data,
+ extra=extra)
+ else:
+ self._update_single_value_record(record=record, name=name,
+ type=type, data=data,
+ extra=extra)
+
+ id = ':'.join((self.RECORD_TYPE_MAP[type], name))
+ return Record(id=id, name=name, type=type, data=data, zone=record.zone,
+ driver=self, extra=extra)
+
+ def delete_record(self, record):
+ try:
+ r = record
+ batch = [('DELETE', r.name, r.type, r.data, r.extra)]
+ self._post_changeset(record.zone, batch)
+ except InvalidChangeBatch:
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=r.id)
+ return True
+
+ def ex_create_multi_value_record(self, name, zone, type, data, extra=None):
+ """
+ Create a record with multiple values with a single call.
+
+ :return: A list of created records.
+ :rtype: ``list`` of :class:`libcloud.dns.base.Record`
+ """
+ extra = extra or {}
+
+ attrs = {'xmlns': NAMESPACE}
+ changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
+ batch = ET.SubElement(changeset, 'ChangeBatch')
+ changes = ET.SubElement(batch, 'Changes')
+
+ change = ET.SubElement(changes, 'Change')
+ ET.SubElement(change, 'Action').text = 'CREATE'
+
+ rrs = ET.SubElement(change, 'ResourceRecordSet')
+ ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain
+ ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type]
+ ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
+
+ rrecs = ET.SubElement(rrs, 'ResourceRecords')
+
+ # Value is provided as a multi line string
+ values = [value.strip() for value in data.split('\n') if
+ value.strip()]
+
+ for value in values:
+ rrec = ET.SubElement(rrecs, 'ResourceRecord')
+ ET.SubElement(rrec, 'Value').text = value
+
+ uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
+ data = ET.tostring(changeset)
+ self.connection.set_context({'zone_id': zone.id})
+ self.connection.request(uri, method='POST', data=data)
+
+ id = ':'.join((self.RECORD_TYPE_MAP[type], name))
+
+ records = []
+ for value in values:
+ record = Record(id=id, name=name, type=type, data=value, zone=zone,
+ driver=self, extra=extra)
+ records.append(record)
+
+ return record
+
+ def ex_delete_all_records(self, zone):
+ """
+ Remove all the records for the provided zone.
+
+ :param zone: Zone to delete records for.
+ :type zone: :class:`Zone`
+ """
+ deletions = []
+ for r in zone.list_records():
+ if r.type in (RecordType.NS, RecordType.SOA):
+ continue
+ deletions.append(('DELETE', r.name, r.type, r.data, r.extra))
+
+ if deletions:
+ self._post_changeset(zone, deletions)
+
+ def _update_single_value_record(self, record, name=None, type=None,
+ data=None, extra=None):
+ batch = [
+ ('DELETE', record.name, record.type, record.data, record.extra),
+ ('CREATE', name, type, data, extra)
+ ]
+
+ return self._post_changeset(record.zone, batch)
+
+ def _update_multi_value_record(self, record, name=None, type=None,
+ data=None, extra=None):
+ other_records = record.extra.get('_other_records', [])
+
+ attrs = {'xmlns': NAMESPACE}
+ changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
+ batch = ET.SubElement(changeset, 'ChangeBatch')
+ changes = ET.SubElement(batch, 'Changes')
+
+ # Delete existing records
+ change = ET.SubElement(changes, 'Change')
+ ET.SubElement(change, 'Action').text = 'DELETE'
+
+ rrs = ET.SubElement(change, 'ResourceRecordSet')
+ ET.SubElement(rrs, 'Name').text = record.name + '.' + \
+ record.zone.domain
+ ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[record.type]
+ ET.SubElement(rrs, 'TTL').text = str(record.extra.get('ttl', '0'))
+
+ rrecs = ET.SubElement(rrs, 'ResourceRecords')
+
+ rrec = ET.SubElement(rrecs, 'ResourceRecord')
+ ET.SubElement(rrec, 'Value').text = record.data
+
+ for other_record in other_records:
+ rrec = ET.SubElement(rrecs, 'ResourceRecord')
+ ET.SubElement(rrec, 'Value').text = other_record['data']
+
+ # Re-create new (updated) records. Since we are updating a multi value
+ # record, only a single record is updated and others are left as is.
+ change = ET.SubElement(changes, 'Change')
+ ET.SubElement(change, 'Action').text = 'CREATE'
+
+ rrs = ET.SubElement(change, 'ResourceRecordSet')
+ ET.SubElement(rrs, 'Name').text = name + '.' + record.zone.domain
+ ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type]
+ ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
+
+ rrecs = ET.SubElement(rrs, 'ResourceRecords')
+
+ rrec = ET.SubElement(rrecs, 'ResourceRecord')
+ ET.SubElement(rrec, 'Value').text = data
+
+ for other_record in other_records:
+ rrec = ET.SubElement(rrecs, 'ResourceRecord')
+ ET.SubElement(rrec, 'Value').text = other_record['data']
+
+ uri = API_ROOT + 'hostedzone/' + record.zone.id + '/rrset'
+ data = ET.tostring(changeset)
+ self.connection.set_context({'zone_id': record.zone.id})
+ response = self.connection.request(uri, method='POST', data=data)
+
+ return response.status == httplib.OK
+
+ def _post_changeset(self, zone, changes_list):
+ attrs = {'xmlns': NAMESPACE}
+ changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
+ batch = ET.SubElement(changeset, 'ChangeBatch')
+ changes = ET.SubElement(batch, 'Changes')
+
+ for action, name, type_, data, extra in changes_list:
+ change = ET.SubElement(changes, 'Change')
+ ET.SubElement(change, 'Action').text = action
+
+ rrs = ET.SubElement(change, 'ResourceRecordSet')
+ ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain
+ ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_]
+ ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
+
+ rrecs = ET.SubElement(rrs, 'ResourceRecords')
+ rrec = ET.SubElement(rrecs, 'ResourceRecord')
+ ET.SubElement(rrec, 'Value').text = data
+
+ uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
+ data = ET.tostring(changeset)
+ self.connection.set_context({'zone_id': zone.id})
+ response = self.connection.request(uri, method='POST', data=data)
+
+ return response.status == httplib.OK
+
+ def _to_zones(self, data):
+ zones = []
+ for element in data.findall(fixxpath(xpath='HostedZones/HostedZone',
+ namespace=NAMESPACE)):
+ zones.append(self._to_zone(element))
+
+ return zones
+
+ def _to_zone(self, elem):
+ name = findtext(element=elem, xpath='Name', namespace=NAMESPACE)
+ id = findtext(element=elem, xpath='Id',
+ namespace=NAMESPACE).replace('/hostedzone/', '')
+ comment = findtext(element=elem, xpath='Config/Comment',
+ namespace=NAMESPACE)
+ resource_record_count = int(findtext(element=elem,
+ xpath='ResourceRecordSetCount',
+ namespace=NAMESPACE))
+
+ extra = {'Comment': comment, 'ResourceRecordSetCount':
+ resource_record_count}
+
+ zone = Zone(id=id, domain=name, type='master', ttl=0, driver=self,
+ extra=extra)
+ return zone
+
+ def _to_records(self, data, zone):
+ records = []
+ elems = data.findall(
+ fixxpath(xpath='ResourceRecordSets/ResourceRecordSet',
+ namespace=NAMESPACE))
+ for elem in elems:
+ record_set = elem.findall(fixxpath(
+ xpath='ResourceRecords/ResourceRecord',
+ namespace=NAMESPACE))
+ record_count = len(record_set)
+ multiple_value_record = (record_count > 1)
+
+ record_set_records = []
+
+ for index, record in enumerate(record_set):
+ # Need to special handling for records with multiple values for
+ # update to work correctly
+ record = self._to_record(elem=elem, zone=zone, index=index)
+ record.extra['_multi_value'] = multiple_value_record
+
+ if multiple_value_record:
+ record.extra['_other_records'] = []
+
+ record_set_records.append(record)
+
+ # Store reference to other records so update works correctly
+ if multiple_value_record:
+ for index in range(0, len(record_set_records)):
+ record = record_set_records[index]
+
+ for other_index, other_record in \
+ enumerate(record_set_records):
+ if index == other_index:
+ # Skip current record
+ continue
+
+ extra = copy.deepcopy(other_record.extra)
+ extra.pop('_multi_value')
+ extra.pop('_other_records')
+
+ item = {'name': other_record.name,
+ 'data': other_record.data,
+ 'type': other_record.type,
+ 'extra': extra}
+ record.extra['_other_records'].append(item)
+
+ records.extend(record_set_records)
+
+ return records
+
+ def _to_record(self, elem, zone, index=0):
+ name = findtext(element=elem, xpath='Name',
+ namespace=NAMESPACE)
+ name = name[:-len(zone.domain) - 1]
+
+ type = self._string_to_record_type(findtext(element=elem, xpath='Type',
+ namespace=NAMESPACE))
+ ttl = int(findtext(element=elem, xpath='TTL', namespace=NAMESPACE))
+
+ value_elem = elem.findall(
+ fixxpath(xpath='ResourceRecords/ResourceRecord',
+ namespace=NAMESPACE))[index]
+ data = findtext(element=(value_elem), xpath='Value',
+ namespace=NAMESPACE)
+
+ extra = {'ttl': ttl}
+
+ if type == 'MX':
+ split = data.split()
+ priority, data = split
+ extra['priority'] = int(priority)
+ elif type == 'SRV':
+ split = data.split()
+ priority, weight, port, data = split
+ extra['priority'] = int(priority)
+ extra['weight'] = int(weight)
+ extra['port'] = int(port)
+
+ id = ':'.join((self.RECORD_TYPE_MAP[type], name))
+ record = Record(id=id, name=name, type=type, data=data, zone=zone,
+ driver=self, extra=extra)
+ return record
+
+ def _get_more(self, rtype, **kwargs):
+ exhausted = False
+ last_key = None
+ while not exhausted:
+ items, last_key, exhausted = self._get_data(rtype, last_key,
+ **kwargs)
+ for item in items:
+ yield item
+
+ def _get_data(self, rtype, last_key, **kwargs):
+ params = {}
+ if last_key:
+ params['name'] = last_key
+ path = API_ROOT + 'hostedzone'
+
+ if rtype == 'zones':
+ response = self.connection.request(path, params=params)
+ transform_func = self._to_zones
+ elif rtype == 'records':
+ zone = kwargs['zone']
+ path += '/%s/rrset' % (zone.id)
+ self.connection.set_context({'zone_id': zone.id})
+ response = self.connection.request(path, params=params)
+ transform_func = self._to_records
+
+ if response.status == httplib.OK:
+ is_truncated = findtext(element=response.object,
+ xpath='IsTruncated',
+ namespace=NAMESPACE)
+ exhausted = is_truncated != 'true'
+ last_key = findtext(element=response.object,
+ xpath='NextRecordName',
+ namespace=NAMESPACE)
+ items = transform_func(data=response.object, **kwargs)
+ return items, last_key, exhausted
+ else:
+ return [], None, True
diff --git a/awx/lib/site-packages/libcloud/dns/drivers/zerigo.py b/awx/lib/site-packages/libcloud/dns/drivers/zerigo.py
new file mode 100644
index 0000000000..0af7a9f453
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/drivers/zerigo.py
@@ -0,0 +1,484 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'ZerigoDNSDriver'
+]
+
+
+import copy
+import base64
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import b
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+from libcloud.utils.misc import merge_valid_keys, get_new_obj
+from libcloud.utils.xml import findtext, findall
+from libcloud.common.base import XmlResponse, ConnectionUserAndKey
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.common.types import MalformedResponseError
+from libcloud.dns.types import Provider, RecordType
+from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
+from libcloud.dns.base import DNSDriver, Zone, Record
+
+API_HOST = 'ns.zerigo.com'
+API_VERSION = '1.1'
+API_ROOT = '/api/%s/' % (API_VERSION)
+
+VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers']
+VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority']
+
+# Number of items per page (maximum limit is 1000)
+ITEMS_PER_PAGE = 100
+
+
+class ZerigoError(LibcloudError):
+ def __init__(self, code, errors):
+ self.code = code
+ self.errors = errors or []
+
+ def __str__(self):
+ return 'Errors: %s' % (', '.join(self.errors))
+
+ def __repr__(self):
+ return ('' % (
+ self.code, len(self.errors)))
+
+
+class ZerigoDNSResponse(XmlResponse):
+ def success(self):
+ return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
+
+ def parse_error(self):
+ status = int(self.status)
+
+ if status == 401:
+ if not self.body:
+ raise InvalidCredsError(str(self.status) + ': ' + self.error)
+ else:
+ raise InvalidCredsError(self.body)
+ elif status == 404:
+ context = self.connection.context
+ if context['resource'] == 'zone':
+ raise ZoneDoesNotExistError(value='', driver=self,
+ zone_id=context['id'])
+ elif context['resource'] == 'record':
+ raise RecordDoesNotExistError(value='', driver=self,
+ record_id=context['id'])
+ elif status != 503:
+ try:
+ body = ET.XML(self.body)
+ except:
+ raise MalformedResponseError('Failed to parse XML',
+ body=self.body)
+
+ errors = []
+ for error in findall(element=body, xpath='error'):
+ errors.append(error.text)
+
+ raise ZerigoError(code=status, errors=errors)
+
+ return self.body
+
+
+class ZerigoDNSConnection(ConnectionUserAndKey):
+ host = API_HOST
+ secure = True
+ responseCls = ZerigoDNSResponse
+
+ def add_default_headers(self, headers):
+ auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
+ headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8'))
+ return headers
+
+ def request(self, action, params=None, data='', headers=None,
+ method='GET'):
+ if not headers:
+ headers = {}
+ if not params:
+ params = {}
+
+ if method in ("POST", "PUT"):
+ headers = {'Content-Type': 'application/xml; charset=UTF-8'}
+ return super(ZerigoDNSConnection, self).request(action=action,
+ params=params,
+ data=data,
+ method=method,
+ headers=headers)
+
+
+class ZerigoDNSDriver(DNSDriver):
+ type = Provider.ZERIGO
+ name = 'Zerigo DNS'
+ website = 'http://www.zerigo.com/'
+ connectionCls = ZerigoDNSConnection
+
+ RECORD_TYPE_MAP = {
+ RecordType.A: 'A',
+ RecordType.AAAA: 'AAAA',
+ RecordType.CNAME: 'CNAME',
+ RecordType.GEO: 'GEO',
+ RecordType.MX: 'MX',
+ RecordType.NAPTR: 'NAPTR',
+ RecordType.NS: 'NS',
+ RecordType.PTR: 'PTR',
+ RecordType.REDIRECT: 'REDIRECT',
+ RecordType.SPF: 'SPF',
+ RecordType.SRV: 'SRV',
+ RecordType.TXT: 'TXT',
+ RecordType.URL: 'URL',
+ }
+
+ def iterate_zones(self):
+ return self._get_more('zones')
+
+ def iterate_records(self, zone):
+ return self._get_more('records', zone=zone)
+
+ def get_zone(self, zone_id):
+ path = API_ROOT + 'zones/%s.xml' % (zone_id)
+ self.connection.set_context({'resource': 'zone', 'id': zone_id})
+ data = self.connection.request(path).object
+ zone = self._to_zone(elem=data)
+ return zone
+
+ def get_record(self, zone_id, record_id):
+ zone = self.get_zone(zone_id=zone_id)
+ self.connection.set_context({'resource': 'record', 'id': record_id})
+ path = API_ROOT + 'hosts/%s.xml' % (record_id)
+ data = self.connection.request(path).object
+ record = self._to_record(elem=data, zone=zone)
+ return record
+
+ def create_zone(self, domain, type='master', ttl=None, extra=None):
+ """
+ Create a new zone.
+
+ Provider API docs:
+ https://www.zerigo.com/docs/apis/dns/1.1/zones/create
+
+ @inherits: :class:`DNSDriver.create_zone`
+ """
+ path = API_ROOT + 'zones.xml'
+ zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
+ extra=extra)
+ data = self.connection.request(action=path,
+ data=ET.tostring(zone_elem),
+ method='POST').object
+ zone = self._to_zone(elem=data)
+ return zone
+
+ def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
+ """
+ Update an existing zone.
+
+ Provider API docs:
+ https://www.zerigo.com/docs/apis/dns/1.1/zones/update
+
+ @inherits: :class:`DNSDriver.update_zone`
+ """
+ if domain:
+ raise LibcloudError('Domain cannot be changed', driver=self)
+
+ path = API_ROOT + 'zones/%s.xml' % (zone.id)
+ zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
+ extra=extra)
+ response = self.connection.request(action=path,
+ data=ET.tostring(zone_elem),
+ method='PUT')
+ assert response.status == httplib.OK
+
+ merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
+ valid_keys=VALID_ZONE_EXTRA_PARAMS,
+ extra=extra)
+ updated_zone = get_new_obj(obj=zone, klass=Zone,
+ attributes={'type': type,
+ 'ttl': ttl,
+ 'extra': merged})
+ return updated_zone
+
+ def create_record(self, name, zone, type, data, extra=None):
+ """
+ Create a new record.
+
+ Provider API docs:
+ https://www.zerigo.com/docs/apis/dns/1.1/hosts/create
+
+ @inherits: :class:`DNSDriver.create_record`
+ """
+ path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
+ record_elem = self._to_record_elem(name=name, type=type, data=data,
+ extra=extra)
+ response = self.connection.request(action=path,
+ data=ET.tostring(record_elem),
+ method='POST')
+ assert response.status == httplib.CREATED
+ record = self._to_record(elem=response.object, zone=zone)
+ return record
+
+ def update_record(self, record, name=None, type=None, data=None,
+ extra=None):
+ path = API_ROOT + 'hosts/%s.xml' % (record.id)
+ record_elem = self._to_record_elem(name=name, type=type, data=data,
+ extra=extra)
+ response = self.connection.request(action=path,
+ data=ET.tostring(record_elem),
+ method='PUT')
+ assert response.status == httplib.OK
+
+ merged = merge_valid_keys(params=copy.deepcopy(record.extra),
+ valid_keys=VALID_RECORD_EXTRA_PARAMS,
+ extra=extra)
+ updated_record = get_new_obj(obj=record, klass=Record,
+ attributes={'type': type,
+ 'data': data,
+ 'extra': merged})
+ return updated_record
+
+ def delete_zone(self, zone):
+ path = API_ROOT + 'zones/%s.xml' % (zone.id)
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ response = self.connection.request(action=path, method='DELETE')
+ return response.status == httplib.OK
+
+ def delete_record(self, record):
+ path = API_ROOT + 'hosts/%s.xml' % (record.id)
+ self.connection.set_context({'resource': 'record', 'id': record.id})
+ response = self.connection.request(action=path, method='DELETE')
+ return response.status == httplib.OK
+
+ def ex_get_zone_by_domain(self, domain):
+ """
+ Retrieve a zone object by the domain name.
+
+ :param domain: The domain which should be used
+ :type domain: ``str``
+
+ :rtype: :class:`Zone`
+ """
+ path = API_ROOT + 'zones/%s.xml' % (domain)
+ self.connection.set_context({'resource': 'zone', 'id': domain})
+ data = self.connection.request(path).object
+ zone = self._to_zone(elem=data)
+ return zone
+
+ def ex_force_slave_axfr(self, zone):
+ """
+ Force a zone transfer.
+
+ :param zone: Zone which should be used.
+ :type zone: :class:`Zone`
+
+ :rtype: :class:`Zone`
+ """
+ path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id)
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ response = self.connection.request(path, method='POST')
+ assert response.status == httplib.ACCEPTED
+ return zone
+
+ def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None):
+ zone_elem = ET.Element('zone', {})
+
+ if domain:
+ domain_elem = ET.SubElement(zone_elem, 'domain')
+ domain_elem.text = domain
+
+ if type:
+ ns_type_elem = ET.SubElement(zone_elem, 'ns-type')
+
+ if type == 'master':
+ ns_type_elem.text = 'pri_sec'
+ elif type == 'slave':
+ if not extra or 'ns1' not in extra:
+ raise LibcloudError('ns1 extra attribute is required ' +
+ 'when zone type is slave', driver=self)
+
+ ns_type_elem.text = 'sec'
+ ns1_elem = ET.SubElement(zone_elem, 'ns1')
+ ns1_elem.text = extra['ns1']
+ elif type == 'std_master':
+ # TODO: Each driver should provide supported zone types
+ # Slave name servers are elsewhere
+ if not extra or 'slave-nameservers' not in extra:
+ raise LibcloudError('slave-nameservers extra ' +
+ 'attribute is required whenzone ' +
+ 'type is std_master', driver=self)
+
+ ns_type_elem.text = 'pri'
+ slave_nameservers_elem = ET.SubElement(zone_elem,
+ 'slave-nameservers')
+ slave_nameservers_elem.text = extra['slave-nameservers']
+
+ if ttl:
+ default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl')
+ default_ttl_elem.text = str(ttl)
+
+ if extra and 'tag-list' in extra:
+ tags = extra['tag-list']
+
+ tags_elem = ET.SubElement(zone_elem, 'tag-list')
+ tags_elem.text = ' '.join(tags)
+
+ return zone_elem
+
+ def _to_record_elem(self, name=None, type=None, data=None, extra=None):
+ record_elem = ET.Element('host', {})
+
+ if name:
+ name_elem = ET.SubElement(record_elem, 'hostname')
+ name_elem.text = name
+
+ if type is not None:
+ type_elem = ET.SubElement(record_elem, 'host-type')
+ type_elem.text = self.RECORD_TYPE_MAP[type]
+
+ if data:
+ data_elem = ET.SubElement(record_elem, 'data')
+ data_elem.text = data
+
+ if extra:
+ if 'ttl' in extra:
+ ttl_elem = ET.SubElement(record_elem, 'ttl',
+ {'type': 'integer'})
+ ttl_elem.text = str(extra['ttl'])
+
+ if 'priority' in extra:
+ # Only MX and SRV records support priority
+ priority_elem = ET.SubElement(record_elem, 'priority',
+ {'type': 'integer'})
+
+ priority_elem.text = str(extra['priority'])
+
+ if 'notes' in extra:
+ notes_elem = ET.SubElement(record_elem, 'notes')
+ notes_elem.text = extra['notes']
+
+ return record_elem
+
+ def _to_zones(self, elem):
+ zones = []
+
+ for item in findall(element=elem, xpath='zone'):
+ zone = self._to_zone(elem=item)
+ zones.append(zone)
+
+ return zones
+
+ def _to_zone(self, elem):
+ id = findtext(element=elem, xpath='id')
+ domain = findtext(element=elem, xpath='domain')
+ type = findtext(element=elem, xpath='ns-type')
+ type = 'master' if type.find('pri') == 0 else 'slave'
+ ttl = findtext(element=elem, xpath='default-ttl')
+
+ hostmaster = findtext(element=elem, xpath='hostmaster')
+ custom_ns = findtext(element=elem, xpath='custom-ns')
+ custom_nameservers = findtext(element=elem, xpath='custom-nameservers')
+ notes = findtext(element=elem, xpath='notes')
+ nx_ttl = findtext(element=elem, xpath='nx-ttl')
+ slave_nameservers = findtext(element=elem, xpath='slave-nameservers')
+ tags = findtext(element=elem, xpath='tag-list')
+ tags = tags.split(' ') if tags else []
+
+ extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns,
+ 'custom-nameservers': custom_nameservers, 'notes': notes,
+ 'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers,
+ 'tags': tags}
+ zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
+ driver=self, extra=extra)
+ return zone
+
+ def _to_records(self, elem, zone):
+ records = []
+
+ for item in findall(element=elem, xpath='host'):
+ record = self._to_record(elem=item, zone=zone)
+ records.append(record)
+
+ return records
+
+ def _to_record(self, elem, zone):
+ id = findtext(element=elem, xpath='id')
+ name = findtext(element=elem, xpath='hostname')
+ type = findtext(element=elem, xpath='host-type')
+ type = self._string_to_record_type(type)
+ data = findtext(element=elem, xpath='data')
+
+ notes = findtext(element=elem, xpath='notes', no_text_value=None)
+ state = findtext(element=elem, xpath='state', no_text_value=None)
+ fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None)
+ priority = findtext(element=elem, xpath='priority', no_text_value=None)
+ ttl = findtext(element=elem, xpath='ttl', no_text_value=None)
+
+ if not name:
+ name = None
+
+ if ttl:
+ ttl = int(ttl)
+
+ extra = {'notes': notes, 'state': state, 'fqdn': fqdn,
+ 'priority': priority, 'ttl': ttl}
+
+ record = Record(id=id, name=name, type=type, data=data,
+ zone=zone, driver=self, extra=extra)
+ return record
+
+ def _get_more(self, rtype, **kwargs):
+ exhausted = False
+ last_key = None
+
+ while not exhausted:
+ items, last_key, exhausted = self._get_data(rtype, last_key,
+ **kwargs)
+
+ for item in items:
+ yield item
+
+ def _get_data(self, rtype, last_key, **kwargs):
+ # Note: last_key in this case really is a "last_page".
+ # TODO: Update base driver and change last_key to something more
+ # generic - e.g. marker
+ params = {}
+ params['per_page'] = ITEMS_PER_PAGE
+ params['page'] = last_key + 1 if last_key else 1
+
+ if rtype == 'zones':
+ path = API_ROOT + 'zones.xml'
+ response = self.connection.request(path)
+ transform_func = self._to_zones
+ elif rtype == 'records':
+ zone = kwargs['zone']
+ path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
+ self.connection.set_context({'resource': 'zone', 'id': zone.id})
+ response = self.connection.request(path, params=params)
+ transform_func = self._to_records
+
+ exhausted = False
+ result_count = int(response.headers.get('x-query-count', 0))
+
+ if (params['page'] * ITEMS_PER_PAGE) >= result_count:
+ exhausted = True
+
+ if response.status == httplib.OK:
+ items = transform_func(elem=response.object, **kwargs)
+ return items, params['page'], exhausted
+ else:
+ return [], None, True
diff --git a/awx/lib/site-packages/libcloud/dns/providers.py b/awx/lib/site-packages/libcloud/dns/providers.py
new file mode 100644
index 0000000000..64483a45ab
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/providers.py
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.utils.misc import get_driver as get_provider_driver
+from libcloud.utils.misc import set_driver as set_provider_driver
+from libcloud.dns.types import Provider
+
+DRIVERS = {
+ Provider.DUMMY:
+ ('libcloud.dns.drivers.dummy', 'DummyDNSDriver'),
+ Provider.LINODE:
+ ('libcloud.dns.drivers.linode', 'LinodeDNSDriver'),
+ Provider.ZERIGO:
+ ('libcloud.dns.drivers.zerigo', 'ZerigoDNSDriver'),
+ Provider.RACKSPACE:
+ ('libcloud.dns.drivers.rackspace', 'RackspaceDNSDriver'),
+ Provider.HOSTVIRTUAL:
+ ('libcloud.dns.drivers.hostvirtual', 'HostVirtualDNSDriver'),
+ Provider.ROUTE53:
+ ('libcloud.dns.drivers.route53', 'Route53DNSDriver'),
+ Provider.GANDI:
+ ('libcloud.dns.drivers.gandi', 'GandiDNSDriver'),
+ Provider.GOOGLE: ('libcloud.dns.drivers.google', 'GoogleDNSDriver'),
+ # Deprecated
+ Provider.RACKSPACE_US:
+ ('libcloud.dns.drivers.rackspace', 'RackspaceUSDNSDriver'),
+ Provider.RACKSPACE_UK:
+ ('libcloud.dns.drivers.rackspace', 'RackspaceUKDNSDriver')
+}
+
+
+def get_driver(provider):
+ return get_provider_driver(DRIVERS, provider)
+
+
+def set_driver(provider, module, klass):
+ return set_provider_driver(DRIVERS, provider, module, klass)
diff --git a/awx/lib/site-packages/libcloud/dns/types.py b/awx/lib/site-packages/libcloud/dns/types.py
new file mode 100644
index 0000000000..3b3a79e60f
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/dns/types.py
@@ -0,0 +1,115 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.common.types import LibcloudError
+
+__all__ = [
+ 'Provider',
+ 'RecordType',
+ 'ZoneError',
+ 'ZoneDoesNotExistError',
+ 'ZoneAlreadyExistsError',
+ 'RecordError',
+ 'RecordDoesNotExistError',
+ 'RecordAlreadyExistsError'
+]
+
+
+class Provider(object):
+ DUMMY = 'dummy'
+ LINODE = 'linode'
+ RACKSPACE = 'rackspace'
+ ZERIGO = 'zerigo'
+ ROUTE53 = 'route53'
+ HOSTVIRTUAL = 'hostvirtual'
+ GANDI = 'gandi'
+ GOOGLE = 'google'
+
+ # Deprecated
+ RACKSPACE_US = 'rackspace_us'
+ RACKSPACE_UK = 'rackspace_uk'
+
+
+class RecordType(object):
+ """
+ DNS record type.
+ """
+ A = 'A'
+ AAAA = 'AAAA'
+ MX = 'MX'
+ NS = 'NS'
+ CNAME = 'CNAME'
+ DNAME = 'DNAME'
+ TXT = 'TXT'
+ PTR = 'PTR'
+ SOA = 'SOA'
+ SPF = 'SPF'
+ SRV = 'SRV'
+ PTR = 'PTR'
+ NAPTR = 'NAPTR'
+ REDIRECT = 'REDIRECT'
+ GEO = 'GEO'
+ URL = 'URL'
+ WKS = 'WKS'
+ LOC = 'LOC'
+
+
+class ZoneError(LibcloudError):
+ error_type = 'ZoneError'
+ kwargs = ('zone_id', )
+
+ def __init__(self, value, driver, zone_id):
+ self.zone_id = zone_id
+ super(ZoneError, self).__init__(value=value, driver=driver)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ('<%s in %s, zone_id=%s, value=%s>' %
+ (self.error_type, repr(self.driver),
+ self.zone_id, self.value))
+
+
+class ZoneDoesNotExistError(ZoneError):
+ error_type = 'ZoneDoesNotExistError'
+
+
+class ZoneAlreadyExistsError(ZoneError):
+ error_type = 'ZoneAlreadyExistsError'
+
+
+class RecordError(LibcloudError):
+ error_type = 'RecordError'
+
+ def __init__(self, value, driver, record_id):
+ self.record_id = record_id
+ super(RecordError, self).__init__(value=value, driver=driver)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return ('<%s in %s, record_id=%s, value=%s>' %
+ (self.error_type, repr(self.driver),
+ self.record_id, self.value))
+
+
+class RecordDoesNotExistError(RecordError):
+ error_type = 'RecordDoesNotExistError'
+
+
+class RecordAlreadyExistsError(RecordError):
+ error_type = 'RecordAlreadyExistsError'
diff --git a/awx/lib/site-packages/libcloud/httplib_ssl.py b/awx/lib/site-packages/libcloud/httplib_ssl.py
new file mode 100644
index 0000000000..29136ef201
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/httplib_ssl.py
@@ -0,0 +1,158 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Subclass for httplib.HTTPSConnection with optional certificate name
+verification, depending on libcloud.security settings.
+"""
+import os
+import re
+import socket
+import ssl
+import warnings
+
+import libcloud.security
+from libcloud.utils.py3 import httplib
+
+
+class LibcloudHTTPSConnection(httplib.HTTPSConnection):
+ """
+ LibcloudHTTPSConnection
+
+ Subclass of HTTPSConnection which verifies certificate names
+ if and only if CA certificates are available.
+ """
+ verify = True # verify by default
+ ca_cert = None # no default CA Certificate
+
+ def __init__(self, *args, **kwargs):
+ """
+ Constructor
+ """
+ self._setup_verify()
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+
+ def _setup_verify(self):
+ """
+ Setup Verify SSL or not
+
+ Reads security module's VERIFY_SSL_CERT and toggles whether
+ the class overrides the connect() class method or runs the
+ inherited httplib.HTTPSConnection connect()
+ """
+ self.verify = libcloud.security.VERIFY_SSL_CERT
+
+ if self.verify:
+ self._setup_ca_cert()
+ else:
+ warnings.warn(libcloud.security.VERIFY_SSL_DISABLED_MSG)
+
+ def _setup_ca_cert(self):
+ """
+ Setup CA Certs
+
+ Search in CA_CERTS_PATH for valid candidates and
+ return first match. Otherwise, complain about certs
+ not being available.
+ """
+ if not self.verify:
+ return
+
+ ca_certs_available = [cert
+ for cert in libcloud.security.CA_CERTS_PATH
+ if os.path.exists(cert) and os.path.isfile(cert)]
+ if ca_certs_available:
+ # use first available certificate
+ self.ca_cert = ca_certs_available[0]
+ else:
+ raise RuntimeError(
+ libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG)
+
+ def connect(self):
+ """
+ Connect
+
+ Checks if verification is toggled; if not, just call
+ httplib.HTTPSConnection's connect
+ """
+ if not self.verify:
+ return httplib.HTTPSConnection.connect(self)
+
+ # otherwise, create a connection and verify the hostname
+ # use socket.create_connection (in 2.6+) if possible
+ if getattr(socket, 'create_connection', None):
+ sock = socket.create_connection((self.host, self.port),
+ self.timeout)
+ else:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect((self.host, self.port))
+ self.sock = ssl.wrap_socket(sock,
+ self.key_file,
+ self.cert_file,
+ cert_reqs=ssl.CERT_REQUIRED,
+ ca_certs=self.ca_cert,
+ ssl_version=ssl.PROTOCOL_TLSv1)
+ cert = self.sock.getpeercert()
+ if not self._verify_hostname(self.host, cert):
+ raise ssl.SSLError('Failed to verify hostname')
+
+ def _verify_hostname(self, hostname, cert):
+ """
+ Verify hostname against peer cert
+
+ Check both commonName and entries in subjectAltName, using a
+ rudimentary glob to dns regex check to find matches
+ """
+ common_name = self._get_common_name(cert)
+ alt_names = self._get_subject_alt_names(cert)
+
+ # replace * with alphanumeric and dash
+ # replace . with literal .
+ # http://www.dns.net/dnsrd/trick.html#legal-hostnames
+ valid_patterns = [
+ re.compile('^' + pattern.replace(r".", r"\.")
+ .replace(r"*", r"[0-9A-Za-z\-]+") + '$')
+ for pattern in (set(common_name) | set(alt_names))]
+
+ return any(
+ pattern.search(hostname)
+ for pattern in valid_patterns
+ )
+
+ def _get_subject_alt_names(self, cert):
+ """
+ Get SubjectAltNames
+
+ Retrieve 'subjectAltName' attributes from cert data structure
+ """
+ if 'subjectAltName' not in cert:
+ values = []
+ else:
+ values = [value
+ for field, value in cert['subjectAltName']
+ if field == 'DNS']
+ return values
+
+ def _get_common_name(self, cert):
+ """
+ Get Common Name
+
+ Retrieve 'commonName' attribute from cert data structure
+ """
+ if 'subject' not in cert:
+ return None
+ values = [value[0][1]
+ for value in cert['subject']
+ if value[0][0] == 'commonName']
+ return values
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/__init__.py b/awx/lib/site-packages/libcloud/loadbalancer/__init__.py
new file mode 100644
index 0000000000..70891776ff
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/__init__.py
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Module for working with Load Balancers
+"""
+
+__all__ = [
+ 'base',
+ 'providers',
+ 'types',
+ 'drivers'
+]
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/base.py b/awx/lib/site-packages/libcloud/loadbalancer/base.py
new file mode 100644
index 0000000000..b785af0232
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/base.py
@@ -0,0 +1,346 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.common.base import ConnectionKey, BaseDriver
+from libcloud.common.types import LibcloudError
+
+__all__ = [
+ 'Member',
+ 'LoadBalancer',
+ 'Algorithm',
+ 'Driver',
+ 'DEFAULT_ALGORITHM'
+]
+
+
+class Member(object):
+ """
+ Represents a load balancer member.
+ """
+
+ def __init__(self, id, ip, port, balancer=None, extra=None):
+ """
+ :param id: Member ID.
+ :type id: ``str``
+
+ :param ip: IP address of this member.
+ :param ip: ``str``
+
+ :param port: Port of this member
+ :param port: ``str``
+
+ :param balancer: Balancer this member is attached to. (optional)
+ :param balancer: :class:`.LoadBalancer`
+
+ :param extra: Provider specific attributes.
+ :type extra: ``dict``
+ """
+ self.id = str(id) if id else None
+ self.ip = ip
+ self.port = port
+ self.balancer = balancer
+ self.extra = extra or {}
+
+ def __repr__(self):
+ return ('' % (self.id,
+ self.ip, self.port))
+
+
+class LoadBalancer(object):
+ """
+ Provide a common interface for handling Load Balancers.
+ """
+
+ def __init__(self, id, name, state, ip, port, driver, extra=None):
+ """
+ :param id: Load balancer ID.
+ :type id: ``str``
+
+ :param name: Load balancer name.
+ :type name: ``str``
+
+ :param state: State this loadbalancer is in.
+ :type state: :class:`libcloud.loadbalancer.types.State`
+
+ :param ip: IP address of this loadbalancer.
+ :type ip: ``str``
+
+ :param port: Port of this loadbalancer.
+ :type port: ``int``
+
+ :param driver: Driver this loadbalancer belongs to.
+ :type driver: :class:`.Driver`
+
+ :param extra: Provier specific attributes. (optional)
+ :type extra: ``dict``
+ """
+ self.id = str(id) if id else None
+ self.name = name
+ self.state = state
+ self.ip = ip
+ self.port = port
+ self.driver = driver
+ self.extra = extra or {}
+
+ def attach_compute_node(self, node):
+ return self.driver.balancer_attach_compute_node(balancer=self,
+ node=node)
+
+ def attach_member(self, member):
+ return self.driver.balancer_attach_member(balancer=self,
+ member=member)
+
+ def detach_member(self, member):
+ return self.driver.balancer_detach_member(balancer=self,
+ member=member)
+
+ def list_members(self):
+ return self.driver.balancer_list_members(balancer=self)
+
+ def destroy(self):
+ return self.driver.destroy_balancer(balancer=self)
+
+ def __repr__(self):
+ return ('' % (self.id,
+ self.name, self.state))
+
+
+class Algorithm(object):
+ """
+ Represents a load balancing algorithm.
+ """
+
+ RANDOM = 0
+ ROUND_ROBIN = 1
+ LEAST_CONNECTIONS = 2
+ WEIGHTED_ROUND_ROBIN = 3
+ WEIGHTED_LEAST_CONNECTIONS = 4
+
+DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN
+
+
+class Driver(BaseDriver):
+ """
+ A base Driver class to derive from
+
+ This class is always subclassed by a specific driver.
+ """
+
+ name = None
+ website = None
+
+ connectionCls = ConnectionKey
+ _ALGORITHM_TO_VALUE_MAP = {}
+ _VALUE_TO_ALGORITHM_MAP = {}
+
+ def __init__(self, key, secret=None, secure=True, host=None,
+ port=None, **kwargs):
+ super(Driver, self).__init__(key=key, secret=secret, secure=secure,
+ host=host, port=port, **kwargs)
+
+ def list_protocols(self):
+ """
+ Return a list of supported protocols.
+
+ :rtype: ``list`` of ``str``
+ """
+ raise NotImplementedError(
+ 'list_protocols not implemented for this driver')
+
+ def list_balancers(self):
+ """
+ List all loadbalancers
+
+ :rtype: ``list`` of :class:`LoadBalancer`
+ """
+ raise NotImplementedError(
+ 'list_balancers not implemented for this driver')
+
+ def create_balancer(self, name, port, protocol, algorithm, members):
+ """
+ Create a new load balancer instance
+
+ :param name: Name of the new load balancer (required)
+ :type name: ``str``
+
+ :param port: Port the load balancer should listen on, defaults to 80
+ :type port: ``str``
+
+ :param protocol: Loadbalancer protocol, defaults to http.
+ :type protocol: ``str``
+
+ :param members: list of Members to attach to balancer
+ :type members: ``list`` of :class:`Member`
+
+ :param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN.
+ :type algorithm: :class:`Algorithm`
+
+ :rtype: :class:`LoadBalancer`
+ """
+ raise NotImplementedError(
+ 'create_balancer not implemented for this driver')
+
+ def destroy_balancer(self, balancer):
+ """
+ Destroy a load balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :return: ``True`` if the destroy was successful, otherwise ``False``.
+ :rtype: ``bool``
+ """
+
+ raise NotImplementedError(
+ 'destroy_balancer not implemented for this driver')
+
+ def get_balancer(self, balancer_id):
+ """
+ Return a :class:`LoadBalancer` object.
+
+ :param balancer_id: id of a load balancer you want to fetch
+ :type balancer_id: ``str``
+
+ :rtype: :class:`LoadBalancer`
+ """
+
+ raise NotImplementedError(
+ 'get_balancer not implemented for this driver')
+
+ def update_balancer(self, balancer, **kwargs):
+ """
+ Sets the name, algorithm, protocol, or port on a load balancer.
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param name: New load balancer name
+ :type name: ``str``
+
+ :param algorithm: New load balancer algorithm
+ :type algorithm: :class:`Algorithm`
+
+ :param protocol: New load balancer protocol
+ :type protocol: ``str``
+
+ :param port: New load balancer port
+ :type port: ``int``
+
+ :rtype: :class:`LoadBalancer`
+ """
+ raise NotImplementedError(
+ 'update_balancer not implemented for this driver')
+
+ def balancer_attach_compute_node(self, balancer, node):
+ """
+ Attach a compute node as a member to the load balancer.
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param node: Node to join to the balancer
+ :type node: :class:`Node`
+
+ :return: Member after joining the balancer.
+ :rtype: :class:`Member`
+ """
+
+ member = Member(id=None, ip=node.public_ips[0], port=balancer.port)
+ return self.balancer_attach_member(balancer, member)
+
+ def balancer_attach_member(self, balancer, member):
+ """
+ Attach a member to balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param member: Member to join to the balancer
+ :type member: :class:`Member`
+
+ :return: Member after joining the balancer.
+ :rtype: :class:`Member`
+ """
+
+ raise NotImplementedError(
+ 'balancer_attach_member not implemented for this driver')
+
+ def balancer_detach_member(self, balancer, member):
+ """
+ Detach member from balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param member: Member which should be used
+ :type member: :class:`Member`
+
+ :return: ``True`` if member detach was successful, otherwise ``False``.
+ :rtype: ``bool``
+ """
+
+ raise NotImplementedError(
+ 'balancer_detach_member not implemented for this driver')
+
+ def balancer_list_members(self, balancer):
+ """
+ Return list of members attached to balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :rtype: ``list`` of :class:`Member`
+ """
+
+ raise NotImplementedError(
+ 'balancer_list_members not implemented for this driver')
+
+ def list_supported_algorithms(self):
+ """
+ Return algorithms supported by this driver.
+
+ :rtype: ``list`` of ``str``
+ """
+ return list(self._ALGORITHM_TO_VALUE_MAP.keys())
+
+ def _value_to_algorithm(self, value):
+ """
+ Return :class`Algorithm` based on the value.
+
+ :param value: Algorithm name (e.g. http, tcp, ...).
+ :type value: ``str``
+
+ @rype :class:`Algorithm`
+ """
+ try:
+ return self._VALUE_TO_ALGORITHM_MAP[value]
+ except KeyError:
+ raise LibcloudError(value='Invalid value: %s' % (value),
+ driver=self)
+
+ def _algorithm_to_value(self, algorithm):
+ """
+ Return string value for the provided algorithm.
+
+ :param value: Algorithm enum.
+ :type value: :class:`Algorithm`
+
+ @rype ``str``
+ """
+ try:
+ return self._ALGORITHM_TO_VALUE_MAP[algorithm]
+ except KeyError:
+ raise LibcloudError(value='Invalid algorithm: %s' % (algorithm),
+ driver=self)
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/__init__.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/__init__.py
new file mode 100644
index 0000000000..f4fdb8666a
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/__init__.py
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'rackspace',
+ 'gogrid'
+]
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/brightbox.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/brightbox.py
new file mode 100644
index 0000000000..22310d4daa
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/brightbox.py
@@ -0,0 +1,136 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from libcloud.utils.py3 import httplib
+from libcloud.common.brightbox import BrightboxConnection
+from libcloud.loadbalancer.base import Driver, Algorithm, Member
+from libcloud.loadbalancer.base import LoadBalancer
+from libcloud.loadbalancer.types import State
+from libcloud.utils.misc import reverse_dict
+
+API_VERSION = '1.0'
+
+
+class BrightboxLBDriver(Driver):
+ connectionCls = BrightboxConnection
+
+ name = 'Brightbox'
+ website = 'http://www.brightbox.co.uk/'
+
+ LB_STATE_MAP = {
+ 'creating': State.PENDING,
+ 'active': State.RUNNING,
+ 'deleting': State.UNKNOWN,
+ 'deleted': State.UNKNOWN,
+ 'failing': State.UNKNOWN,
+ 'failed': State.UNKNOWN,
+ }
+
+ _VALUE_TO_ALGORITHM_MAP = {
+ 'round-robin': Algorithm.ROUND_ROBIN,
+ 'least-connections': Algorithm.LEAST_CONNECTIONS
+ }
+
+ _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
+
+ def list_protocols(self):
+ return ['tcp', 'http']
+
+ def list_balancers(self):
+ data = self.connection.request('/%s/load_balancers' % API_VERSION) \
+ .object
+
+ return list(map(self._to_balancer, data))
+
+ def create_balancer(self, name, port, protocol, algorithm, members):
+ response = self._post(
+ '/%s/load_balancers' % API_VERSION,
+ {'name': name,
+ 'nodes': list(map(self._member_to_node, members)),
+ 'policy': self._algorithm_to_value(algorithm),
+ 'listeners': [{'in': port, 'out': port, 'protocol': protocol}],
+ 'healthcheck': {'type': protocol, 'port': port}}
+ )
+
+ return self._to_balancer(response.object)
+
+ def destroy_balancer(self, balancer):
+ response = self.connection.request('/%s/load_balancers/%s' %
+ (API_VERSION, balancer.id),
+ method='DELETE')
+
+ return response.status == httplib.ACCEPTED
+
+ def get_balancer(self, balancer_id):
+ data = self.connection.request(
+ '/%s/load_balancers/%s' % (API_VERSION, balancer_id)).object
+ return self._to_balancer(data)
+
+ def balancer_attach_compute_node(self, balancer, node):
+ return self.balancer_attach_member(balancer, node)
+
+ def balancer_attach_member(self, balancer, member):
+ path = '/%s/load_balancers/%s/add_nodes' % (API_VERSION, balancer.id)
+
+ self._post(path, {'nodes': [self._member_to_node(member)]})
+
+ return member
+
+ def balancer_detach_member(self, balancer, member):
+ path = '/%s/load_balancers/%s/remove_nodes' % (API_VERSION,
+ balancer.id)
+
+ response = self._post(path, {'nodes': [self._member_to_node(member)]})
+
+ return response.status == httplib.ACCEPTED
+
+ def balancer_list_members(self, balancer):
+ path = '/%s/load_balancers/%s' % (API_VERSION, balancer.id)
+
+ data = self.connection.request(path).object
+
+ func = lambda data: self._node_to_member(data, balancer)
+ return list(map(func, data['nodes']))
+
+ def _post(self, path, data={}):
+ headers = {'Content-Type': 'application/json'}
+
+ return self.connection.request(path, data=data, headers=headers,
+ method='POST')
+
+ def _to_balancer(self, data):
+ return LoadBalancer(
+ id=data['id'],
+ name=data['name'],
+ state=self.LB_STATE_MAP.get(data['status'], State.UNKNOWN),
+ ip=self._public_ip(data),
+ port=data['listeners'][0]['in'],
+ driver=self.connection.driver
+ )
+
+ def _member_to_node(self, member):
+ return {'node': member.id}
+
+ def _node_to_member(self, data, balancer):
+ return Member(id=data['id'], ip=None, port=None, balancer=balancer)
+
+ def _public_ip(self, data):
+ if len(data['cloud_ips']) > 0:
+ ip = data['cloud_ips'][0]['public_ip']
+ else:
+ ip = None
+
+ return ip
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/cloudstack.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/cloudstack.py
new file mode 100644
index 0000000000..ee88824308
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/cloudstack.py
@@ -0,0 +1,178 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.common.cloudstack import CloudStackDriverMixIn
+from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
+from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
+from libcloud.loadbalancer.types import Provider
+from libcloud.loadbalancer.types import State
+from libcloud.utils.misc import reverse_dict
+
+
+class CloudStackLBDriver(CloudStackDriverMixIn, Driver):
+ """Driver for CloudStack load balancers."""
+
+ api_name = 'cloudstack_lb'
+ name = 'CloudStack'
+ website = 'http://cloudstack.org/'
+ type = Provider.CLOUDSTACK
+
+ _VALUE_TO_ALGORITHM_MAP = {
+ 'roundrobin': Algorithm.ROUND_ROBIN,
+ 'leastconn': Algorithm.LEAST_CONNECTIONS
+ }
+ _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
+
+ LB_STATE_MAP = {
+ 'Active': State.RUNNING,
+ }
+
+ def __init__(self, key, secret=None, secure=True, host=None,
+ path=None, port=None, *args, **kwargs):
+ """
+ @inherits: :class:`Driver.__init__`
+ """
+ host = host if host else self.host
+ path = path if path else self.path
+
+ if path is not None:
+ self.path = path
+
+ if host is not None:
+ self.host = host
+
+ if (self.type == Provider.CLOUDSTACK) and (not host or not path):
+ raise Exception('When instantiating CloudStack driver directly ' +
+ 'you also need to provide host and path argument')
+
+ super(CloudStackLBDriver, self).__init__(key=key, secret=secret,
+ secure=secure,
+ host=host, port=port)
+
+ def list_protocols(self):
+ """
+ We don't actually have any protocol awareness beyond TCP.
+
+ :rtype: ``list`` of ``str``
+ """
+ return ['tcp']
+
+ def list_balancers(self):
+ balancers = self._sync_request(command='listLoadBalancerRules',
+ method='GET')
+ balancers = balancers.get('loadbalancerrule', [])
+ return [self._to_balancer(balancer) for balancer in balancers]
+
+ def get_balancer(self, balancer_id):
+ balancer = self._sync_request(command='listLoadBalancerRules',
+ params={'id': balancer_id},
+ method='GET')
+ balancer = balancer.get('loadbalancerrule', [])
+ if not balancer:
+ raise Exception("no such load balancer: " + str(balancer_id))
+ return self._to_balancer(balancer[0])
+
+ def create_balancer(self, name, members, protocol='http', port=80,
+ algorithm=DEFAULT_ALGORITHM, location=None,
+ private_port=None):
+ """
+ @inherits: :class:`Driver.create_balancer`
+
+ :param location: Location
+ :type location: :class:`NodeLocation`
+
+ :param private_port: Private port
+ :type private_port: ``int``
+ """
+ if location is None:
+ locations = self._sync_request(command='listZones', method='GET')
+ location = locations['zone'][0]['id']
+ else:
+ location = location.id
+ if private_port is None:
+ private_port = port
+
+ result = self._async_request(command='associateIpAddress',
+ params={'zoneid': location},
+ method='GET')
+ public_ip = result['ipaddress']
+
+ result = self._sync_request(
+ command='createLoadBalancerRule',
+ params={'algorithm': self._ALGORITHM_TO_VALUE_MAP[algorithm],
+ 'name': name,
+ 'privateport': private_port,
+ 'publicport': port,
+ 'publicipid': public_ip['id']},
+ method='GET')
+
+ balancer = self._to_balancer(result['loadbalancer'])
+
+ for member in members:
+ balancer.attach_member(member)
+
+ return balancer
+
+ def destroy_balancer(self, balancer):
+ self._async_request(command='deleteLoadBalancerRule',
+ params={'id': balancer.id},
+ method='GET')
+ self._async_request(command='disassociateIpAddress',
+ params={'id': balancer.ex_public_ip_id},
+ method='GET')
+
+ def balancer_attach_member(self, balancer, member):
+ member.port = balancer.ex_private_port
+ self._async_request(command='assignToLoadBalancerRule',
+ params={'id': balancer.id,
+ 'virtualmachineids': member.id},
+ method='GET')
+ return True
+
+ def balancer_detach_member(self, balancer, member):
+ self._async_request(command='removeFromLoadBalancerRule',
+ params={'id': balancer.id,
+ 'virtualmachineids': member.id},
+ method='GET')
+ return True
+
+ def balancer_list_members(self, balancer):
+ members = self._sync_request(command='listLoadBalancerRuleInstances',
+ params={'id': balancer.id},
+ method='GET')
+ members = members['loadbalancerruleinstance']
+ return [self._to_member(m, balancer.ex_private_port, balancer)
+ for m in members]
+
+ def _to_balancer(self, obj):
+ balancer = LoadBalancer(
+ id=obj['id'],
+ name=obj['name'],
+ state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN),
+ ip=obj['publicip'],
+ port=obj['publicport'],
+ driver=self.connection.driver
+ )
+ balancer.ex_private_port = obj['privateport']
+ balancer.ex_public_ip_id = obj['publicipid']
+ return balancer
+
+ def _to_member(self, obj, port, balancer):
+ return Member(
+ id=obj['id'],
+ ip=obj['nic'][0]['ipaddress'],
+ port=port,
+ balancer=balancer
+ )
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/elb.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/elb.py
new file mode 100644
index 0000000000..73a3c72d6b
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/elb.py
@@ -0,0 +1,350 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'ElasticLBDriver'
+]
+
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.xml import findtext, findall
+from libcloud.loadbalancer.types import State
+from libcloud.loadbalancer.base import Driver, LoadBalancer, Member
+from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection
+
+
+VERSION = '2012-06-01'
+HOST = 'elasticloadbalancing.%s.amazonaws.com'
+ROOT = '/%s/' % (VERSION)
+NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, )
+
+
+class ELBResponse(AWSGenericResponse):
+ """
+ Amazon ELB response class.
+ """
+ namespace = NS
+ exceptions = {}
+ xpath = 'Error'
+
+
+class ELBConnection(SignedAWSConnection):
+ version = VERSION
+ host = HOST
+ responseCls = ELBResponse
+
+
+class ElasticLBDriver(Driver):
+ name = 'Amazon Elastic Load Balancing'
+ website = 'http://aws.amazon.com/elasticloadbalancing/'
+ connectionCls = ELBConnection
+
+ def __init__(self, access_id, secret, region):
+ super(ElasticLBDriver, self).__init__(access_id, secret)
+ self.region = region
+ self.connection.host = HOST % (region)
+
+ def list_protocols(self):
+ return ['tcp', 'ssl', 'http', 'https']
+
+ def list_balancers(self):
+ params = {'Action': 'DescribeLoadBalancers'}
+ data = self.connection.request(ROOT, params=params).object
+ return self._to_balancers(data)
+
+ def create_balancer(self, name, port, protocol, algorithm, members,
+ ex_members_availability_zones=None):
+ if ex_members_availability_zones is None:
+ ex_members_availability_zones = ['a']
+
+ params = {
+ 'Action': 'CreateLoadBalancer',
+ 'LoadBalancerName': name,
+ 'Listeners.member.1.InstancePort': str(port),
+ 'Listeners.member.1.InstanceProtocol': protocol.upper(),
+ 'Listeners.member.1.LoadBalancerPort': str(port),
+ 'Listeners.member.1.Protocol': protocol.upper(),
+ }
+
+ for i, z in enumerate(ex_members_availability_zones):
+ zone = ''.join((self.region, z))
+ params['AvailabilityZones.member.%d' % (i + 1)] = zone
+
+ data = self.connection.request(ROOT, params=params).object
+
+ balancer = LoadBalancer(
+ id=name,
+ name=name,
+ state=State.PENDING,
+ ip=findtext(element=data, xpath='DNSName', namespace=NS),
+ port=port,
+ driver=self.connection.driver
+ )
+ balancer._members = []
+ return balancer
+
+ def destroy_balancer(self, balancer):
+ params = {
+ 'Action': 'DeleteLoadBalancer',
+ 'LoadBalancerName': balancer.id
+ }
+ self.connection.request(ROOT, params=params)
+ return True
+
+ def get_balancer(self, balancer_id):
+ params = {
+ 'Action': 'DescribeLoadBalancers',
+ 'LoadBalancerNames.member.1': balancer_id
+ }
+ data = self.connection.request(ROOT, params=params).object
+ return self._to_balancers(data)[0]
+
+ def balancer_attach_compute_node(self, balancer, node):
+ params = {
+ 'Action': 'RegisterInstancesWithLoadBalancer',
+ 'LoadBalancerName': balancer.id,
+ 'Instances.member.1.InstanceId': node.id
+ }
+ self.connection.request(ROOT, params=params)
+ balancer._members.append(Member(node.id, None, None, balancer=self))
+
+ def balancer_detach_member(self, balancer, member):
+ params = {
+ 'Action': 'DeregisterInstancesFromLoadBalancer',
+ 'LoadBalancerName': balancer.id,
+ 'Instances.member.1.InstanceId': member.id
+ }
+ self.connection.request(ROOT, params=params)
+ balancer._members = [m for m in balancer._members if m.id != member.id]
+ return True
+
+ def balancer_list_members(self, balancer):
+ return balancer._members
+
+ def ex_list_balancer_policies(self, balancer):
+ """
+ Return a list of policy description string.
+
+ :rtype: ``list`` of ``str``
+ """
+ params = {
+ 'Action': 'DescribeLoadBalancerPolicies',
+ 'LoadBalancerName': balancer.id
+ }
+
+ data = self.connection.request(ROOT, params=params).object
+ return self._to_policies(data)
+
+ def ex_list_balancer_policy_types(self):
+ """
+ Return a list of policy type description string.
+
+ :rtype: ``list`` of ``str``
+ """
+ params = {'Action': 'DescribeLoadBalancerPolicyTypes'}
+
+ data = self.connection.request(ROOT, params=params).object
+ return self._to_policy_types(data)
+
+ def ex_create_balancer_policy(self, name, policy_name, policy_type,
+ policy_attributes=None):
+ """
+ Create a new load balancer policy
+
+ :param name: Balancer name to create the policy for
+ :type name: ``str``
+
+ :param policy_name: policy to be created
+ :type policy_name: ``str``
+
+ :param policy_type: policy type being used to create policy.
+ :type policy_type: ``str``
+
+ :param policy_attributes: Each list contain values, ['AttributeName',
+ 'value']
+ :type policy_attributes: ``PolicyAttribute list``
+ """
+ params = {
+ 'Action': 'CreateLoadBalancerPolicy',
+ 'LoadBalancerName': name,
+ 'PolicyName': policy_name,
+ 'PolicyTypeName': policy_type
+ }
+
+ if policy_attributes is not None:
+ for index, (name, value) in enumerate(
+ policy_attributes.iteritems(), 1):
+ params['PolicyAttributes.member.%d. \
+ AttributeName' % (index)] = name
+ params['PolicyAttributes.member.%d. \
+ AttributeValue' % (index)] = value
+
+ response = self.connection.request(ROOT, params=params)
+ return response.status == httplib.OK
+
+ def ex_delete_balancer_policy(self, name, policy_name):
+ """
+ Delete a load balancer policy
+
+ :param name: balancer name for which policy will be deleted
+ :type name: ``str``
+
+ :param policy_name: The Mnemonic name for the policy being deleted
+ :type policy_name: ``str``
+ """
+ params = {
+ 'Action': 'DeleteLoadBalancerPolicy',
+ 'LoadBalancerName': name,
+ 'PolicyName': policy_name
+ }
+
+ response = self.connection.request(ROOT, params=params)
+ return response.status == httplib.OK
+
+ def ex_set_balancer_policies_listener(self, name, port, policies):
+ """
+ Associates, updates, or disables a policy with a listener on
+ the load balancer
+
+ :param name: balancer name to set policies for listerner
+ :type name: ``str``
+
+ :param port: port to use
+ :type port: ``str``
+
+ :param policies: List of policies to be associated with the balancer
+ :type policies: ``string list``
+ """
+ params = {
+ 'Action': 'SetLoadBalancerPoliciesOfListener',
+ 'LoadBalancerName': name,
+ 'LoadBalancerPort': str(port)
+ }
+
+ if policies:
+ params = self._create_list_params(params, policies,
+ 'PolicyNames.member.%d')
+
+ response = self.connection.request(ROOT, params=params)
+ return response.status == httplib.OK
+
+ def ex_set_balancer_policies_backend_server(self, name, instance_port,
+ policies):
+ """
+ Replaces the current set of policies associated with a port on
+ which the back-end server is listening with a new set of policies
+
+ :param name: balancer name to set policies of backend server
+ :type name: ``str``
+
+ :param instance_port: Instance Port
+ :type instance_port: ``int``
+
+ :param policies: List of policies to be associated with the balancer
+ :type policies: ``string list`
+ """
+ params = {
+ 'Action': 'SetLoadBalancerPoliciesForBackendServer',
+ 'LoadBalancerName': name,
+ 'InstancePort': str(instance_port)
+ }
+
+ if policies:
+ params = self._create_list_params(params, policies,
+ 'PolicyNames.member.%d')
+
+ response = self.connection.request(ROOT, params=params)
+ return response.status == httplib.OK
+
+ def ex_create_balancer_listeners(self, name, listeners=None):
+ """
+ Creates one or more listeners on a load balancer for the specified port
+
+ :param name: The mnemonic name associated with the load balancer
+ :type name: ``str``
+
+ :param listeners: Each tuple contain values, (LoadBalancerPortNumber,
+ InstancePortNumber, Protocol,[SSLCertificateId])
+ :type listeners: ``list of tuple`
+ """
+ params = {
+ 'Action': 'CreateLoadBalancerListeners',
+ 'LoadBalancerName': name
+ }
+
+ for index, listener in enumerate(listeners):
+ i = index + 1
+ protocol = listener[2].upper()
+ params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
+ params['Listeners.member.%d.InstancePort' % i] = listener[1]
+ params['Listeners.member.%d.Protocol' % i] = listener[2]
+ if protocol == 'HTTPS' or protocol == 'SSL':
+ params['Listeners.member.%d. \
+ SSLCertificateId' % i] = listener[3]
+ else:
+ return False
+
+ response = self.connection.request(ROOT, params=params)
+ return response.status == httplib.OK
+
+ def _to_policies(self, data):
+ xpath = 'DescribeLoadBalancerPoliciesResult/PolicyDescriptions/member'
+ return [findtext(element=el, xpath='PolicyName', namespace=NS)
+ for el in findall(element=data, xpath=xpath, namespace=NS)]
+
+ def _to_policy_types(self, data):
+ xpath = 'DescribeLoadBalancerPolicyTypesResult/'
+ xpath += 'PolicyTypeDescriptions/member'
+ return [findtext(element=el, xpath='PolicyTypeName', namespace=NS)
+ for el in findall(element=data, xpath=xpath, namespace=NS)]
+
+ def _to_balancers(self, data):
+ xpath = 'DescribeLoadBalancersResult/LoadBalancerDescriptions/member'
+ return [self._to_balancer(el)
+ for el in findall(element=data, xpath=xpath, namespace=NS)]
+
+ def _to_balancer(self, el):
+ name = findtext(element=el, xpath='LoadBalancerName', namespace=NS)
+ dns_name = findtext(el, xpath='DNSName', namespace=NS)
+ port = findtext(el, xpath='LoadBalancerPort', namespace=NS)
+
+ balancer = LoadBalancer(
+ id=name,
+ name=name,
+ state=State.UNKNOWN,
+ ip=dns_name,
+ port=port,
+ driver=self.connection.driver
+ )
+
+ xpath = 'Instances/member/InstanceId'
+ members = findall(element=el, xpath=xpath, namespace=NS)
+ balancer._members = []
+
+ for m in members:
+ balancer._members.append(Member(m.text, None, None,
+ balancer=balancer))
+
+ return balancer
+
+ def _create_list_params(self, params, items, label):
+ """
+ return parameter list
+ """
+ if isinstance(items, str):
+ items = [items]
+ for index, item in enumerate(items):
+ params[label % (index + 1)] = item
+ return params
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/gce.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/gce.py
new file mode 100644
index 0000000000..09a32fdb6d
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/gce.py
@@ -0,0 +1,362 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+try:
+ import simplejson as json
+except ImportError:
+ import json # NOQA
+
+from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
+from libcloud.compute.drivers.gce import GCEConnection, GCENodeDriver
+
+# GCE doesn't actually give you a algorithm choice, but this is here simply as
+# the closest match. The actual algorithm is described here:
+# https://developers.google.com/compute/docs/load-balancing/#overview
+DEFAULT_ALGORITHM = Algorithm.RANDOM
+
+
+class GCELBDriver(Driver):
+ connectionCls = GCEConnection
+ apiname = 'googleapis'
+ name = 'Google Compute Engine Load Balancer'
+ website = 'https://cloud.google.com/'
+
+ _VALUE_TO_ALGORITHM_MAP = {
+ 'RANDOM': Algorithm.RANDOM
+ }
+
+ def __init__(self, *args, **kwargs):
+
+ if kwargs.get('gce_driver'):
+ self.gce = kwargs['gce_driver']
+ else:
+ self.gce = GCENodeDriver(*args, **kwargs)
+
+ self.connection = self.gce.connection
+
+ def _get_node_from_ip(self, ip):
+ """
+ Return the node object that matches a given public IP address.
+
+ :param ip: Public IP address to search for
+ :type ip: ``str``
+
+ :return: Node object that has the given IP, or None if not found.
+ :rtype: :class:`Node` or None
+ """
+ all_nodes = self.gce.list_nodes(ex_zone='all')
+ for node in all_nodes:
+ if ip in node.public_ips:
+ return node
+ return None
+
+ def list_protocols(self):
+ """
+ Return a list of supported protocols.
+
+ For GCE, this is simply a hardcoded list.
+
+ :rtype: ``list`` of ``str``
+ """
+ return ['TCP', 'UDP']
+
+ def list_balancers(self, ex_region=None):
+ """
+ List all loadbalancers
+
+ :keyword ex_region: The region to return balancers from. If None,
+ will default to self.region. If 'all', will
+ return all balancers.
+ :type ex_region: ``str`` or :class:`GCERegion` or ``None``
+
+ :rtype: ``list`` of :class:`LoadBalancer`
+ """
+ balancers = []
+ for fwr in self.gce.ex_list_forwarding_rules(region=ex_region):
+ balancers.append(self._forwarding_rule_to_loadbalancer(fwr))
+ return balancers
+
+ def create_balancer(self, name, port, protocol, algorithm, members,
+ ex_region=None, ex_healthchecks=None, ex_address=None):
+ """
+ Create a new load balancer instance.
+
+ For GCE, this means creating a forwarding rule and a matching target
+ pool, then adding the members to the target pool.
+
+ :param name: Name of the new load balancer (required)
+ :type name: ``str``
+
+ :param port: Port or range of ports the load balancer should listen
+ on, defaults to all ports. Examples: '80', '5000-5999'
+ :type port: ``str``
+
+ :param protocol: Load balancer protocol. Should be 'tcp' or 'udp',
+ defaults to 'tcp'.
+ :type protocol: ``str``
+
+ :param members: List of Members to attach to balancer. Can be Member
+ objects or Node objects. Node objects are preferred
+ for GCE, but Member objects are accepted to comply
+ with the established libcloud API. Note that the
+ 'port' attribute of the members is ignored.
+ :type members: ``list`` of :class:`Member` or :class:`Node`
+
+ :param algorithm: Load balancing algorithm. Ignored for GCE which
+ uses a hashing-based algorithm.
+ :type algorithm: :class:`Algorithm` or ``None``
+
+ :keyword ex_region: Optional region to create the load balancer in.
+ Defaults to the default region of the GCE Node
+ Driver.
+ :type ex_region: C{GCERegion} or ``str``
+
+ :keyword ex_healthchecks: Optional list of healthcheck objects or
+ names to add to the load balancer.
+ :type ex_healthchecks: ``list`` of :class:`GCEHealthCheck` or
+ ``str``
+
+ :keyword ex_address: Optional static address object to be assigned to
+ the load balancer.
+ :type ex_address: C{GCEAddress}
+
+ :return: LoadBalancer object
+ :rtype: :class:`LoadBalancer`
+ """
+ node_list = []
+ for member in members:
+ # Member object
+ if hasattr(member, 'ip'):
+ if member.extra.get('node'):
+ node_list.append(member.extra['node'])
+ else:
+ node_list.append(self._get_node_from_ip(member.ip))
+ # Node object
+ elif hasattr(member, 'name'):
+ node_list.append(member)
+ # Assume it's a node name otherwise
+ else:
+ node_list.append(self.gce.ex_get_node(member, 'all'))
+
+ # Create Target Pool
+ tp_name = '%s-tp' % name
+ targetpool = self.gce.ex_create_targetpool(
+ tp_name, region=ex_region, healthchecks=ex_healthchecks,
+ nodes=node_list)
+
+ # Create the Forwarding rule, but if it fails, delete the target pool.
+ try:
+ forwarding_rule = self.gce.ex_create_forwarding_rule(
+ name, targetpool, region=ex_region, protocol=protocol,
+ port_range=port, address=ex_address)
+ except:
+ targetpool.destroy()
+ raise
+
+ # Reformat forwarding rule to LoadBalancer object
+ return self._forwarding_rule_to_loadbalancer(forwarding_rule)
+
+ def destroy_balancer(self, balancer):
+ """
+ Destroy a load balancer.
+
+ For GCE, this means destroying the associated forwarding rule, then
+ destroying the target pool that was attached to the forwarding rule.
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ destroy = balancer.extra['forwarding_rule'].destroy()
+ if destroy:
+ tp_destroy = balancer.extra['targetpool'].destroy()
+ return tp_destroy
+ else:
+ return destroy
+
+ def get_balancer(self, balancer_id):
+ """
+ Return a :class:`LoadBalancer` object.
+
+ :param balancer_id: Name of load balancer you wish to fetch. For GCE,
+ this is the name of the associated forwarding
+ rule.
+ :param balancer_id: ``str``
+
+ :rtype: :class:`LoadBalancer`
+ """
+ fwr = self.gce.ex_get_forwarding_rule(balancer_id)
+ return self._forwarding_rule_to_loadbalancer(fwr)
+
+ def balancer_attach_compute_node(self, balancer, node):
+ """
+ Attach a compute node as a member to the load balancer.
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param node: Node to join to the balancer
+ :type node: :class:`Node`
+
+ :return: Member after joining the balancer.
+ :rtype: :class:`Member`
+ """
+ add_node = balancer.extra['targetpool'].add_node(node)
+ if add_node:
+ return self._node_to_member(node, balancer)
+
+ def balancer_attach_member(self, balancer, member):
+ """
+ Attach a member to balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param member: Member to join to the balancer
+ :type member: :class:`Member`
+
+ :return: Member after joining the balancer.
+ :rtype: :class:`Member`
+ """
+ node = member.extra.get('node') or self._get_node_from_ip(member.ip)
+ add_node = balancer.extra['targetpool'].add_node(node)
+ if add_node:
+ return self._node_to_member(node, balancer)
+
+ def balancer_detach_member(self, balancer, member):
+ """
+ Detach member from balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param member: Member which should be used
+ :type member: :class:`Member`
+
+ :return: True if member detach was successful, otherwise False
+ :rtype: ``bool``
+ """
+ node = member.extra.get('node') or self._get_node_from_ip(member.ip)
+ remove_node = balancer.extra['targetpool'].remove_node(node)
+ return remove_node
+
+ def balancer_list_members(self, balancer):
+ """
+ Return list of members attached to balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :rtype: ``list`` of :class:`Member`
+ """
+ return [self._node_to_member(n, balancer) for n in
+ balancer.extra['targetpool'].nodes]
+
+ def ex_create_healthcheck(self, *args, **kwargs):
+ return self.gce.ex_create_healthcheck(*args, **kwargs)
+
+ def ex_list_healthchecks(self):
+ return self.gce.ex_list_healthchecks()
+
+ def ex_balancer_attach_healthcheck(self, balancer, healthcheck):
+ """
+ Attach a healthcheck to balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param healthcheck: Healthcheck to add
+ :type healthcheck: :class:`GCEHealthCheck`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return balancer.extra['targetpool'].add_healthcheck(healthcheck)
+
+ def ex_balancer_detach_healthcheck(self, balancer, healthcheck):
+ """
+ Detach healtcheck from balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :param healthcheck: Healthcheck to remove
+ :type healthcheck: :class:`GCEHealthCheck`
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ return balancer.extra['targetpool'].remove_healthcheck(healthcheck)
+
+ def ex_balancer_list_healthchecks(self, balancer):
+ """
+ Return list of healthchecks attached to balancer
+
+ :param balancer: LoadBalancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :rtype: ``list`` of :class:`HealthChecks`
+ """
+ return balancer.extra['healthchecks']
+
+ def _node_to_member(self, node, balancer):
+ """
+ Return a Member object based on a Node.
+
+ :param node: Node object
+ :type node: :class:`Node`
+
+ :keyword balancer: The balancer the member is attached to.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Member object
+ :rtype: :class:`Member`
+ """
+ # A balancer can have a node as a member, even if the node doesn't
+ # exist. In this case, 'node' is simply a string to where the resource
+ # would be found if it was there.
+ if hasattr(node, 'name'):
+ member_id = node.name
+ member_ip = node.public_ips[0]
+ else:
+ member_id = node
+ member_ip = None
+
+ extra = {'node': node}
+ return Member(id=member_id, ip=member_ip, port=balancer.port,
+ balancer=balancer, extra=extra)
+
+ def _forwarding_rule_to_loadbalancer(self, forwarding_rule):
+ """
+ Return a Load Balancer object based on a GCEForwardingRule object.
+
+ :param forwarding_rule: ForwardingRule object
+ :type forwarding_rule: :class:`GCEForwardingRule`
+
+ :return: LoadBalancer object
+ :rtype: :class:`LoadBalancer`
+ """
+ extra = {}
+ extra['forwarding_rule'] = forwarding_rule
+ extra['targetpool'] = forwarding_rule.targetpool
+ extra['healthchecks'] = forwarding_rule.targetpool.healthchecks
+
+ return LoadBalancer(id=forwarding_rule.id,
+ name=forwarding_rule.name, state=None,
+ ip=forwarding_rule.address,
+ port=forwarding_rule.extra['portRange'],
+ driver=self, extra=extra)
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/gogrid.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/gogrid.py
new file mode 100644
index 0000000000..201ad03125
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/gogrid.py
@@ -0,0 +1,239 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import time
+
+from libcloud.utils.py3 import httplib
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.misc import reverse_dict
+from libcloud.common.types import LibcloudError
+from libcloud.common.gogrid import GoGridConnection, GoGridResponse,\
+ BaseGoGridDriver
+from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
+from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
+from libcloud.loadbalancer.types import State, LibcloudLBImmutableError
+
+
+class GoGridLBResponse(GoGridResponse):
+ def success(self):
+ if self.status == httplib.INTERNAL_SERVER_ERROR:
+ # Hack, but at least this error message is more useful than
+ # "unexpected server error"
+ body = json.loads(self.body)
+ if body['method'] == '/grid/loadbalancer/add' and \
+ len(body['list']) >= 1 and \
+ body['list'][0]['message'].find(
+ 'unexpected server error') != -1:
+ raise LibcloudError(
+ value='You mostly likely tried to add a member with an IP'
+ ' address not assigned to your account', driver=self)
+ return super(GoGridLBResponse, self).success()
+
+
+class GoGridLBConnection(GoGridConnection):
+ """
+ Connection class for the GoGrid load-balancer driver.
+ """
+ responseCls = GoGridLBResponse
+
+
+class GoGridLBDriver(BaseGoGridDriver, Driver):
+ connectionCls = GoGridLBConnection
+ api_name = 'gogrid_lb'
+ name = 'GoGrid LB'
+ website = 'http://www.gogrid.com/'
+
+ LB_STATE_MAP = {'On': State.RUNNING,
+ 'Unknown': State.UNKNOWN}
+ _VALUE_TO_ALGORITHM_MAP = {
+ 'round robin': Algorithm.ROUND_ROBIN,
+ 'least connect': Algorithm.LEAST_CONNECTIONS
+ }
+ _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
+
+ def __init__(self, *args, **kwargs):
+ """
+ @inherits: :class:`Driver.__init__`
+ """
+ super(GoGridLBDriver, self).__init__(*args, **kwargs)
+
+ def list_protocols(self):
+ # GoGrid only supports http
+ return ['http']
+
+ def list_balancers(self):
+ return self._to_balancers(
+ self.connection.request('/api/grid/loadbalancer/list').object)
+
+ def ex_create_balancer_nowait(self, name, members, protocol='http',
+ port=80, algorithm=DEFAULT_ALGORITHM):
+ """
+ @inherits: :class:`Driver.create_balancer`
+ """
+ algorithm = self._algorithm_to_value(algorithm)
+
+ params = {'name': name,
+ 'loadbalancer.type': algorithm,
+ 'virtualip.ip': self._get_first_ip(),
+ 'virtualip.port': port}
+ params.update(self._members_to_params(members))
+
+ resp = self.connection.request('/api/grid/loadbalancer/add',
+ method='GET',
+ params=params)
+ return self._to_balancers(resp.object)[0]
+
+ def create_balancer(self, name, members, protocol='http', port=80,
+ algorithm=DEFAULT_ALGORITHM):
+ balancer = self.ex_create_balancer_nowait(name, members, protocol,
+ port, algorithm)
+
+ timeout = 60 * 20
+ waittime = 0
+ interval = 2 * 15
+
+ if balancer.id is not None:
+ return balancer
+ else:
+ while waittime < timeout:
+ balancers = self.list_balancers()
+
+ for i in balancers:
+ if i.name == balancer.name and i.id is not None:
+ return i
+
+ waittime += interval
+ time.sleep(interval)
+
+ raise Exception('Failed to get id')
+
+ def destroy_balancer(self, balancer):
+ try:
+ resp = self.connection.request(
+ '/api/grid/loadbalancer/delete', method='POST',
+ params={'id': balancer.id})
+ except Exception:
+ e = sys.exc_info()[1]
+ if "Update request for LoadBalancer" in str(e):
+ raise LibcloudLBImmutableError(
+ "Cannot delete immutable object", GoGridLBDriver)
+ else:
+ raise
+
+ return resp.status == 200
+
+ def get_balancer(self, **kwargs):
+ params = {}
+
+ try:
+ params['name'] = kwargs['ex_balancer_name']
+ except KeyError:
+ balancer_id = kwargs['balancer_id']
+ params['id'] = balancer_id
+
+ resp = self.connection.request('/api/grid/loadbalancer/get',
+ params=params)
+
+ return self._to_balancers(resp.object)[0]
+
+ def balancer_attach_member(self, balancer, member):
+ members = self.balancer_list_members(balancer)
+ members.append(member)
+
+ params = {"id": balancer.id}
+
+ params.update(self._members_to_params(members))
+
+ resp = self._update_balancer(params)
+ return [m for m in
+ self._to_members(resp.object["list"][0]["realiplist"],
+ balancer)
+ if m.ip == member.ip][0]
+
+ def balancer_detach_member(self, balancer, member):
+ members = self.balancer_list_members(balancer)
+
+ remaining_members = [n for n in members if n.id != member.id]
+
+ params = {"id": balancer.id}
+ params.update(self._members_to_params(remaining_members))
+
+ resp = self._update_balancer(params)
+
+ return resp.status == 200
+
+ def balancer_list_members(self, balancer):
+ resp = self.connection.request('/api/grid/loadbalancer/get',
+ params={'id': balancer.id})
+ return self._to_members(resp.object["list"][0]["realiplist"], balancer)
+
+ def _update_balancer(self, params):
+ try:
+ return self.connection.request('/api/grid/loadbalancer/edit',
+ method='POST',
+ params=params)
+ except Exception:
+ e = sys.exc_info()[1]
+ if "Update already pending" in str(e):
+ raise LibcloudLBImmutableError(
+ "Balancer is immutable", GoGridLBDriver)
+
+ raise LibcloudError(value='Exception: %s' % str(e), driver=self)
+
+ def _members_to_params(self, members):
+ """
+ Helper method to convert list of :class:`Member` objects
+ to GET params.
+
+ """
+
+ params = {}
+
+ i = 0
+ for member in members:
+ params["realiplist.%s.ip" % i] = member.ip
+ params["realiplist.%s.port" % i] = member.port
+ i += 1
+
+ return params
+
+ def _to_balancers(self, object):
+ return [self._to_balancer(el) for el in object["list"]]
+
+ def _to_balancer(self, el):
+ lb = LoadBalancer(id=el.get("id"),
+ name=el["name"],
+ state=self.LB_STATE_MAP.get(
+ el["state"]["name"], State.UNKNOWN),
+ ip=el["virtualip"]["ip"]["ip"],
+ port=el["virtualip"]["port"],
+ driver=self.connection.driver)
+ return lb
+
+ def _to_members(self, object, balancer=None):
+ return [self._to_member(el, balancer) for el in object]
+
+ def _to_member(self, el, balancer=None):
+ member = Member(id=el["ip"]["id"],
+ ip=el["ip"]["ip"],
+ port=el["port"],
+ balancer=balancer)
+ return member
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/ninefold.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/ninefold.py
new file mode 100644
index 0000000000..cb28f6c6cc
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/ninefold.py
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.loadbalancer.providers import Provider
+
+from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver
+
+
+class NinefoldLBDriver(CloudStackLBDriver):
+ "Driver for load balancers on Ninefold's Compute platform."
+
+ host = 'api.ninefold.com'
+ path = '/compute/v1.0/'
+
+ type = Provider.NINEFOLD
+ name = 'Ninefold LB'
+ website = 'http://ninefold.com/'
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/drivers/rackspace.py b/awx/lib/site-packages/libcloud/loadbalancer/drivers/rackspace.py
new file mode 100644
index 0000000000..bf2a13c617
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/drivers/rackspace.py
@@ -0,0 +1,1530 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import datetime
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.misc import reverse_dict
+from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
+from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
+from libcloud.compute.drivers.rackspace import RackspaceConnection
+from libcloud.common.types import LibcloudError
+from libcloud.common.base import JsonResponse, PollingConnection
+from libcloud.loadbalancer.types import State, MemberCondition
+from libcloud.common.openstack import OpenStackDriverMixin
+from libcloud.common.rackspace import AUTH_URL
+
+ENDPOINT_ARGS_MAP = {
+ 'dfw': {'service_type': 'rax:load-balancer',
+ 'name': 'cloudLoadBalancers',
+ 'region': 'DFW'},
+ 'ord': {'service_type': 'rax:load-balancer',
+ 'name': 'cloudLoadBalancers',
+ 'region': 'ORD'},
+ 'iad': {'service_type': 'rax:load-balancer',
+ 'name': 'cloudLoadBalancers',
+ 'region': 'IAD'},
+ 'lon': {'service_type': 'rax:load-balancer',
+ 'name': 'cloudLoadBalancers',
+ 'region': 'LON'},
+ 'syd': {'service_type': 'rax:load-balancer',
+ 'name': 'cloudLoadBalancers',
+ 'region': 'SYD'},
+ 'hkg': {'service_type': 'rax:load-balancer',
+ 'name': 'cloudLoadBalancers',
+ 'region': 'HKG'},
+
+}
+
+
+class RackspaceResponse(JsonResponse):
+ def parse_body(self):
+ if not self.body:
+ return None
+ return super(RackspaceResponse, self).parse_body()
+
+ def success(self):
+ return 200 <= int(self.status) <= 299
+
+
+class RackspaceHealthMonitor(object):
+ """
+ :param type: type of load balancer. currently CONNECT (connection
+ monitoring), HTTP, HTTPS (connection and HTTP
+ monitoring) are supported.
+ :type type: ``str``
+
+ :param delay: minimum seconds to wait before executing the health
+ monitor. (Must be between 1 and 3600)
+ :type delay: ``int``
+
+ :param timeout: maximum seconds to wait when establishing a
+ connection before timing out. (Must be between 1
+ and 3600)
+ :type timeout: ``int``
+
+ :param attempts_before_deactivation: Number of monitor failures
+ before removing a node from
+ rotation. (Must be between 1
+ and 10)
+ :type attempts_before_deactivation: ``int``
+ """
+
+ def __init__(self, type, delay, timeout, attempts_before_deactivation):
+ self.type = type
+ self.delay = delay
+ self.timeout = timeout
+ self.attempts_before_deactivation = attempts_before_deactivation
+
+ def __repr__(self):
+ return ('' %
+ (self.type, self.delay, self.timeout,
+ self.attempts_before_deactivation))
+
+ def _to_dict(self):
+ return {
+ 'type': self.type,
+ 'delay': self.delay,
+ 'timeout': self.timeout,
+ 'attemptsBeforeDeactivation': self.attempts_before_deactivation
+ }
+
+
+class RackspaceHTTPHealthMonitor(RackspaceHealthMonitor):
+ """
+ A HTTP health monitor adds extra features to a Rackspace health monitor.
+
+ :param path: the HTTP path to monitor.
+ :type path: ``str``
+
+ :param body_regex: Regular expression used to evaluate the body of
+ the HTTP response.
+ :type body_regex: ``str``
+
+ :param status_regex: Regular expression used to evaluate the HTTP
+ status code of the response.
+ :type status_regex: ``str``
+ """
+
+ def __init__(self, type, delay, timeout, attempts_before_deactivation,
+ path, body_regex, status_regex):
+ super(RackspaceHTTPHealthMonitor, self).__init__(
+ type, delay, timeout, attempts_before_deactivation)
+ self.path = path
+ self.body_regex = body_regex
+ self.status_regex = status_regex
+
+ def __repr__(self):
+ return ('' %
+ (self.type, self.delay, self.timeout,
+ self.attempts_before_deactivation, self.path, self.body_regex,
+ self.status_regex))
+
+ def _to_dict(self):
+ super_dict = super(RackspaceHTTPHealthMonitor, self)._to_dict()
+ super_dict['path'] = self.path
+ super_dict['statusRegex'] = self.status_regex
+
+ if self.body_regex:
+ super_dict['bodyRegex'] = self.body_regex
+
+ return super_dict
+
+
+class RackspaceConnectionThrottle(object):
+ """
+ :param min_connections: Minimum number of connections per IP address
+ before applying throttling.
+ :type min_connections: ``int``
+
+ :param max_connections: Maximum number of connections per IP address.
+ (Must be between 0 and 100000, 0 allows an
+ unlimited number of connections.)
+ :type max_connections: ``int``
+
+ :param max_connection_rate: Maximum number of connections allowed
+ from a single IP address within the
+ given rate_interval_seconds. (Must be
+ between 0 and 100000, 0 allows an
+ unlimited number of connections.)
+ :type max_connection_rate: ``int``
+
+ :param rate_interval_seconds: Interval at which the
+ max_connection_rate is enforced.
+ (Must be between 1 and 3600.)
+ :type rate_interval_seconds: ``int``
+ """
+
+ def __init__(self, min_connections, max_connections,
+ max_connection_rate, rate_interval_seconds):
+ self.min_connections = min_connections
+ self.max_connections = max_connections
+ self.max_connection_rate = max_connection_rate
+ self.rate_interval_seconds = rate_interval_seconds
+
+ def __repr__(self):
+ return ('' %
+ (self.min_connections, self.max_connections,
+ self.max_connection_rate, self.rate_interval_seconds))
+
+ def _to_dict(self):
+ return {
+ 'maxConnections': self.max_connections,
+ 'minConnections': self.min_connections,
+ 'maxConnectionRate': self.max_connection_rate,
+ 'rateInterval': self.rate_interval_seconds
+ }
+
+
+class RackspaceAccessRuleType(object):
+ ALLOW = 0
+ DENY = 1
+
+ _RULE_TYPE_STRING_MAP = {
+ ALLOW: 'ALLOW',
+ DENY: 'DENY'
+ }
+
+
+class RackspaceAccessRule(object):
+ """
+ An access rule allows or denies traffic to a Load Balancer based on the
+ incoming IPs.
+
+ :param id: Unique identifier to refer to this rule by.
+ :type id: ``str``
+
+ :param rule_type: RackspaceAccessRuleType.ALLOW or
+ RackspaceAccessRuleType.DENY.
+ :type id: ``int``
+
+ :param address: IP address or cidr (can be IPv4 or IPv6).
+ :type address: ``str``
+ """
+
+ def __init__(self, id=None, rule_type=None, address=None):
+ self.id = id
+ self.rule_type = rule_type
+ self.address = address
+
+ def _to_dict(self):
+ type_string =\
+ RackspaceAccessRuleType._RULE_TYPE_STRING_MAP[self.rule_type]
+
+ as_dict = {
+ 'type': type_string,
+ 'address': self.address
+ }
+
+ if self.id is not None:
+ as_dict['id'] = self.id
+
+ return as_dict
+
+
+class RackspaceConnection(RackspaceConnection, PollingConnection):
+ responseCls = RackspaceResponse
+ auth_url = AUTH_URL
+ poll_interval = 2
+ timeout = 80
+ cache_busting = True
+
+ def request(self, action, params=None, data='', headers=None,
+ method='GET'):
+ if not headers:
+ headers = {}
+ if not params:
+ params = {}
+
+ if method in ('POST', 'PUT'):
+ headers['Content-Type'] = 'application/json'
+
+ return super(RackspaceConnection, self).request(
+ action=action, params=params,
+ data=data, method=method, headers=headers)
+
+ def get_poll_request_kwargs(self, response, context, request_kwargs):
+ return {'action': request_kwargs['action'],
+ 'method': 'GET'}
+
+ def has_completed(self, response):
+ state = response.object['loadBalancer']['status']
+ if state == 'ERROR':
+ raise LibcloudError("Load balancer entered an ERROR state.",
+ driver=self.driver)
+
+ return state == 'ACTIVE'
+
+ def encode_data(self, data):
+ return data
+
+
+class RackspaceLBDriver(Driver, OpenStackDriverMixin):
+ connectionCls = RackspaceConnection
+ api_name = 'rackspace_lb'
+ name = 'Rackspace LB'
+ website = 'http://www.rackspace.com/'
+
+ LB_STATE_MAP = {
+ 'ACTIVE': State.RUNNING,
+ 'BUILD': State.PENDING,
+ 'ERROR': State.ERROR,
+ 'DELETED': State.DELETED,
+ 'PENDING_UPDATE': State.PENDING,
+ 'PENDING_DELETE': State.PENDING
+ }
+
+ LB_MEMBER_CONDITION_MAP = {
+ 'ENABLED': MemberCondition.ENABLED,
+ 'DISABLED': MemberCondition.DISABLED,
+ 'DRAINING': MemberCondition.DRAINING
+ }
+
+ CONDITION_LB_MEMBER_MAP = reverse_dict(LB_MEMBER_CONDITION_MAP)
+
+ _VALUE_TO_ALGORITHM_MAP = {
+ 'RANDOM': Algorithm.RANDOM,
+ 'ROUND_ROBIN': Algorithm.ROUND_ROBIN,
+ 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS,
+ 'WEIGHTED_ROUND_ROBIN': Algorithm.WEIGHTED_ROUND_ROBIN,
+ 'WEIGHTED_LEAST_CONNECTIONS': Algorithm.WEIGHTED_LEAST_CONNECTIONS
+ }
+
+ _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='ord', **kwargs):
+ ex_force_region = kwargs.pop('ex_force_region', None)
+ if ex_force_region:
+ # For backward compatibility
+ region = ex_force_region
+ OpenStackDriverMixin.__init__(self, **kwargs)
+ super(RackspaceLBDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, region=region)
+
+ def _ex_connection_class_kwargs(self):
+ endpoint_args = ENDPOINT_ARGS_MAP[self.region]
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['get_endpoint_args'] = endpoint_args
+ return kwargs
+
+ def list_protocols(self):
+ return self._to_protocols(
+ self.connection.request('/loadbalancers/protocols').object)
+
+ def ex_list_protocols_with_default_ports(self):
+ """
+ List protocols with default ports.
+
+ :rtype: ``list`` of ``tuple``
+ :return: A list of protocols with default ports included.
+ """
+ return self._to_protocols_with_default_ports(
+ self.connection.request('/loadbalancers/protocols').object)
+
+ def list_balancers(self, ex_member_address=None):
+ """
+ @inherits: :class:`Driver.list_balancers`
+
+ :param ex_member_address: Optional IP address of the attachment member.
+ If provided, only the load balancers which
+ have this member attached will be returned.
+ :type ex_member_address: ``str``
+ """
+ params = {}
+
+ if ex_member_address:
+ params['nodeaddress'] = ex_member_address
+
+ return self._to_balancers(
+ self.connection.request('/loadbalancers', params=params).object)
+
+ def create_balancer(self, name, members, protocol='http',
+ port=80, algorithm=DEFAULT_ALGORITHM):
+ return self.ex_create_balancer(name, members, protocol, port,
+ algorithm)
+
+ def ex_create_balancer(self, name, members, protocol='http',
+ port=80, algorithm=DEFAULT_ALGORITHM, vip='PUBLIC'):
+ """
+ Creates a new load balancer instance
+
+ :param name: Name of the new load balancer (required)
+ :type name: ``str``
+
+ :param members: ``list`` of:class:`Member`s to attach to balancer
+ :type members: ``list`` of :class:`Member`
+
+ :param protocol: Loadbalancer protocol, defaults to http.
+ :type protocol: ``str``
+
+ :param port: Port the load balancer should listen on, defaults to 80
+ :type port: ``str``
+
+ :param algorithm: Load balancing algorithm, defaults to
+ LBAlgorithm.ROUND_ROBIN
+ :type algorithm: :class:`Algorithm`
+
+ :param vip: Virtual ip type of PUBLIC, SERVICENET, or ID of a virtual
+ ip
+ :type vip: ``str``
+
+ :rtype: :class:`LoadBalancer`
+ """
+ balancer_attrs = self._kwargs_to_mutable_attrs(
+ name=name,
+ protocol=protocol,
+ port=port,
+ algorithm=algorithm,
+ vip=vip)
+
+ balancer_attrs.update({
+ 'nodes': [self._member_attributes(member) for member in members],
+ })
+ # balancer_attrs['nodes'] = ['fu']
+ balancer_object = {"loadBalancer": balancer_attrs}
+
+ resp = self.connection.request('/loadbalancers',
+ method='POST',
+ data=json.dumps(balancer_object))
+ return self._to_balancer(resp.object['loadBalancer'])
+
+ def _member_attributes(self, member):
+ member_attributes = {'address': member.ip,
+ 'port': member.port}
+
+ member_attributes.update(self._kwargs_to_mutable_member_attrs(
+ **member.extra))
+
+ # If the condition is not specified on the member, then it should be
+ # set to ENABLED by default
+ if 'condition' not in member_attributes:
+ member_attributes['condition'] =\
+ self.CONDITION_LB_MEMBER_MAP[MemberCondition.ENABLED]
+
+ return member_attributes
+
+ def destroy_balancer(self, balancer):
+ uri = '/loadbalancers/%s' % (balancer.id)
+ resp = self.connection.request(uri, method='DELETE')
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_destroy_balancers(self, balancers):
+ """
+ Destroys a list of Balancers (the API supports up to 10).
+
+ :param balancers: A list of Balancers to destroy.
+ :type balancers: ``list`` of :class:`LoadBalancer`
+
+ :return: Returns whether the destroy request was accepted.
+ :rtype: ``bool``
+ """
+ ids = [('id', balancer.id) for balancer in balancers]
+ resp = self.connection.request('/loadbalancers',
+ method='DELETE',
+ params=ids)
+
+ return resp.status == httplib.ACCEPTED
+
+ def get_balancer(self, balancer_id):
+ uri = '/loadbalancers/%s' % (balancer_id)
+ resp = self.connection.request(uri)
+
+ return self._to_balancer(resp.object["loadBalancer"])
+
+ def balancer_attach_member(self, balancer, member):
+ member_object = {"nodes": [self._member_attributes(member)]}
+
+ uri = '/loadbalancers/%s/nodes' % (balancer.id)
+ resp = self.connection.request(uri, method='POST',
+ data=json.dumps(member_object))
+ return self._to_members(resp.object, balancer)[0]
+
+ def ex_balancer_attach_members(self, balancer, members):
+ """
+ Attaches a list of members to a load balancer.
+
+ :param balancer: The Balancer to which members will be attached.
+ :type balancer: :class:`LoadBalancer`
+
+ :param members: A list of Members to attach.
+ :type members: ``list`` of :class:`Member`
+
+ :rtype: ``list`` of :class:`Member`
+ """
+ member_objects = {"nodes": [self._member_attributes(member) for member
+ in members]}
+
+ uri = '/loadbalancers/%s/nodes' % (balancer.id)
+ resp = self.connection.request(uri, method='POST',
+ data=json.dumps(member_objects))
+ return self._to_members(resp.object, balancer)
+
+ def balancer_detach_member(self, balancer, member):
+ # Loadbalancer always needs to have at least 1 member.
+ # Last member cannot be detached. You can only disable it or destroy
+ # the balancer.
+ uri = '/loadbalancers/%s/nodes/%s' % (balancer.id, member.id)
+ resp = self.connection.request(uri, method='DELETE')
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_balancer_detach_members(self, balancer, members):
+ """
+ Detaches a list of members from a balancer (the API supports up to 10).
+ This method blocks until the detach request has been processed and the
+ balancer is in a RUNNING state again.
+
+ :param balancer: The Balancer to detach members from.
+ :type balancer: :class:`LoadBalancer`
+
+ :param members: A list of Members to detach.
+ :type members: ``list`` of :class:`Member`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ accepted = self.ex_balancer_detach_members_no_poll(balancer, members)
+
+ if not accepted:
+ msg = 'Detach members request was not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_balancer_detach_members_no_poll(self, balancer, members):
+ """
+ Detaches a list of members from a balancer (the API supports up to 10).
+ This method returns immediately.
+
+ :param balancer: The Balancer to detach members from.
+ :type balancer: :class:`LoadBalancer`
+
+ :param members: A list of Members to detach.
+ :type members: ``list`` of :class:`Member`
+
+ :return: Returns whether the detach request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/nodes' % (balancer.id)
+ ids = [('id', member.id) for member in members]
+ resp = self.connection.request(uri, method='DELETE', params=ids)
+
+ return resp.status == httplib.ACCEPTED
+
+ def balancer_list_members(self, balancer):
+ uri = '/loadbalancers/%s/nodes' % (balancer.id)
+ data = self.connection.request(uri).object
+ return self._to_members(data, balancer)
+
+ def update_balancer(self, balancer, **kwargs):
+ attrs = self._kwargs_to_mutable_attrs(**kwargs)
+ resp = self.connection.async_request(
+ action='/loadbalancers/%s' % balancer.id,
+ method='PUT',
+ data=json.dumps(attrs))
+ return self._to_balancer(resp.object["loadBalancer"])
+
+ def ex_update_balancer_no_poll(self, balancer, **kwargs):
+ """
+ Update balancer no poll.
+
+ @inherits: :class:`Driver.update_balancer`
+ """
+ attrs = self._kwargs_to_mutable_attrs(**kwargs)
+ resp = self.connection.request(
+ action='/loadbalancers/%s' % balancer.id,
+ method='PUT',
+ data=json.dumps(attrs))
+ return resp.status == httplib.ACCEPTED
+
+ def ex_balancer_update_member(self, balancer, member, **kwargs):
+ """
+ Updates a Member's extra attributes for a Balancer. The attributes can
+ include 'weight' or 'condition'. This method blocks until the update
+ request has been processed and the balancer is in a RUNNING state
+ again.
+
+ :param balancer: Balancer to update the member on.
+ :type balancer: :class:`LoadBalancer`
+
+ :param member: Member which should be used
+ :type member: :class:`Member`
+
+ :keyword **kwargs: New attributes. Should contain either 'weight'
+ or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'.
+ or 'DRAINING'. 'weight' can be set to a positive integer between
+ 1 and 100, with a higher weight indicating that the node will receive
+ more traffic (assuming the Balancer is using a weighted algorithm).
+ :type **kwargs: ``dict``
+
+ :return: Updated Member.
+ :rtype: :class:`Member`
+ """
+ accepted = self.ex_balancer_update_member_no_poll(
+ balancer, member, **kwargs)
+
+ if not accepted:
+ msg = 'Update member attributes was not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ balancer = self._get_updated_balancer(balancer)
+ members = balancer.extra['members']
+
+ updated_members = [m for m in members if m.id == member.id]
+
+ if not updated_members:
+ raise LibcloudError('Could not find updated member')
+
+ return updated_members[0]
+
+ def ex_balancer_update_member_no_poll(self, balancer, member, **kwargs):
+ """
+ Updates a Member's extra attributes for a Balancer. The attribute can
+ include 'weight' or 'condition'. This method returns immediately.
+
+ :param balancer: Balancer to update the member on.
+ :type balancer: :class:`LoadBalancer`
+
+ :param member: Member which should be used
+ :type member: :class:`Member`
+
+ :keyword **kwargs: New attributes. Should contain either 'weight'
+ or 'condition'. 'condition' can be set to 'ENABLED', 'DISABLED'.
+ or 'DRAINING'. 'weight' can be set to a positive integer between
+ 1 and 100, with a higher weight indicating that the node will receive
+ more traffic (assuming the Balancer is using a weighted algorithm).
+ :type **kwargs: ``dict``
+
+ :return: Returns whether the update request was accepted.
+ :rtype: ``bool``
+ """
+ resp = self.connection.request(
+ action='/loadbalancers/%s/nodes/%s' % (balancer.id, member.id),
+ method='PUT',
+ data=json.dumps(self._kwargs_to_mutable_member_attrs(**kwargs))
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_list_algorithm_names(self):
+ """
+ Lists algorithms supported by the API. Returned as strings because
+ this list may change in the future.
+
+ :rtype: ``list`` of ``str``
+ """
+ response = self.connection.request('/loadbalancers/algorithms')
+ return [a["name"].upper() for a in response.object["algorithms"]]
+
+ def ex_get_balancer_error_page(self, balancer):
+ """
+ List error page configured for the specified load balancer.
+
+ :param balancer: Balancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :rtype: ``str``
+ """
+ uri = '/loadbalancers/%s/errorpage' % (balancer.id)
+ resp = self.connection.request(uri)
+
+ return resp.object["errorpage"]["content"]
+
+ def ex_balancer_access_list(self, balancer):
+ """
+ List the access list.
+
+ :param balancer: Balancer which should be used
+ :type balancer: :class:`LoadBalancer`
+
+ :rtype: ``list`` of :class:`RackspaceAccessRule`
+ """
+ uri = '/loadbalancers/%s/accesslist' % (balancer.id)
+ resp = self.connection.request(uri)
+
+ return [self._to_access_rule(el) for el in resp.object["accessList"]]
+
+ def _get_updated_balancer(self, balancer):
+ """
+ Updating a balancer's attributes puts a balancer into
+ 'PENDING_UPDATE' status. Wait until the balancer is
+ back in 'ACTIVE' status and then return the individual
+ balancer details call.
+ """
+ resp = self.connection.async_request(
+ action='/loadbalancers/%s' % balancer.id,
+ method='GET')
+
+ return self._to_balancer(resp.object['loadBalancer'])
+
+ def ex_update_balancer_health_monitor(self, balancer, health_monitor):
+ """
+ Sets a Balancer's health monitor. This method blocks until the update
+ request has been processed and the balancer is in a RUNNING state
+ again.
+
+ :param balancer: Balancer to update.
+ :type balancer: :class:`LoadBalancer`
+
+ :param health_monitor: Health Monitor for the balancer.
+ :type health_monitor: :class:`RackspaceHealthMonitor`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ accepted = self.ex_update_balancer_health_monitor_no_poll(
+ balancer, health_monitor)
+ if not accepted:
+ msg = 'Update health monitor request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_update_balancer_health_monitor_no_poll(self, balancer,
+ health_monitor):
+ """
+ Sets a Balancer's health monitor. This method returns immediately.
+
+ :param balancer: Balancer to update health monitor on.
+ :type balancer: :class:`LoadBalancer`
+
+ :param health_monitor: Health Monitor for the balancer.
+ :type health_monitor: :class:`RackspaceHealthMonitor`
+
+ :return: Returns whether the update request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/healthmonitor' % (balancer.id)
+
+ resp = self.connection.request(
+ uri, method='PUT', data=json.dumps(health_monitor._to_dict()))
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_disable_balancer_health_monitor(self, balancer):
+ """
+ Disables a Balancer's health monitor. This method blocks until the
+ disable request has been processed and the balancer is in a RUNNING
+ state again.
+
+ :param balancer: Balancer to disable health monitor on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_disable_balancer_health_monitor_no_poll(balancer):
+ msg = 'Disable health monitor request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_disable_balancer_health_monitor_no_poll(self, balancer):
+ """
+ Disables a Balancer's health monitor. This method returns
+ immediately.
+
+ :param balancer: Balancer to disable health monitor on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the disable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/healthmonitor' % (balancer.id)
+
+ resp = self.connection.request(uri,
+ method='DELETE')
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_update_balancer_connection_throttle(self, balancer,
+ connection_throttle):
+ """
+ Updates a Balancer's connection throttle. This method blocks until
+ the update request has been processed and the balancer is in a
+ RUNNING state again.
+
+ :param balancer: Balancer to update connection throttle on.
+ :type balancer: :class:`LoadBalancer`
+
+ :param connection_throttle: Connection Throttle for the balancer.
+ :type connection_throttle: :class:`RackspaceConnectionThrottle`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ accepted = self.ex_update_balancer_connection_throttle_no_poll(
+ balancer, connection_throttle)
+
+ if not accepted:
+ msg = 'Update connection throttle request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_update_balancer_connection_throttle_no_poll(self, balancer,
+ connection_throttle):
+ """
+ Sets a Balancer's connection throttle. This method returns
+ immediately.
+
+ :param balancer: Balancer to update connection throttle on.
+ :type balancer: :class:`LoadBalancer`
+
+ :param connection_throttle: Connection Throttle for the balancer.
+ :type connection_throttle: :class:`RackspaceConnectionThrottle`
+
+ :return: Returns whether the update request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id)
+ resp = self.connection.request(
+ uri, method='PUT',
+ data=json.dumps(connection_throttle._to_dict()))
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_disable_balancer_connection_throttle(self, balancer):
+ """
+ Disables a Balancer's connection throttle. This method blocks until
+ the disable request has been processed and the balancer is in a RUNNING
+ state again.
+
+ :param balancer: Balancer to disable connection throttle on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_disable_balancer_connection_throttle_no_poll(balancer):
+ msg = 'Disable connection throttle request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_disable_balancer_connection_throttle_no_poll(self, balancer):
+ """
+ Disables a Balancer's connection throttle. This method returns
+ immediately.
+
+ :param balancer: Balancer to disable connection throttle on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the disable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/connectionthrottle' % (balancer.id)
+ resp = self.connection.request(uri, method='DELETE')
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_enable_balancer_connection_logging(self, balancer):
+ """
+ Enables connection logging for a Balancer. This method blocks until
+ the enable request has been processed and the balancer is in a RUNNING
+ state again.
+
+ :param balancer: Balancer to enable connection logging on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_enable_balancer_connection_logging_no_poll(balancer):
+ msg = 'Enable connection logging request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_enable_balancer_connection_logging_no_poll(self, balancer):
+ """
+ Enables connection logging for a Balancer. This method returns
+ immediately.
+
+ :param balancer: Balancer to enable connection logging on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the enable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/connectionlogging' % (balancer.id)
+
+ resp = self.connection.request(
+ uri, method='PUT',
+ data=json.dumps({'connectionLogging': {'enabled': True}})
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_disable_balancer_connection_logging(self, balancer):
+ """
+ Disables connection logging for a Balancer. This method blocks until
+ the enable request has been processed and the balancer is in a RUNNING
+ state again.
+
+ :param balancer: Balancer to disable connection logging on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_disable_balancer_connection_logging_no_poll(balancer):
+ msg = 'Disable connection logging request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_disable_balancer_connection_logging_no_poll(self, balancer):
+ """
+ Disables connection logging for a Balancer. This method returns
+ immediately.
+
+ :param balancer: Balancer to disable connection logging on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the disable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/connectionlogging' % (balancer.id)
+ resp = self.connection.request(
+ uri, method='PUT',
+ data=json.dumps({'connectionLogging': {'enabled': False}})
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_enable_balancer_session_persistence(self, balancer):
+ """
+ Enables session persistence for a Balancer by setting the persistence
+ type to 'HTTP_COOKIE'. This method blocks until the enable request
+ has been processed and the balancer is in a RUNNING state again.
+
+ :param balancer: Balancer to enable session persistence on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_enable_balancer_session_persistence_no_poll(balancer):
+ msg = 'Enable session persistence request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_enable_balancer_session_persistence_no_poll(self, balancer):
+ """
+ Enables session persistence for a Balancer by setting the persistence
+ type to 'HTTP_COOKIE'. This method returns immediately.
+
+ :param balancer: Balancer to enable session persistence on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the enable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id)
+ resp = self.connection.request(
+ uri, method='PUT',
+ data=json.dumps(
+ {'sessionPersistence': {'persistenceType': 'HTTP_COOKIE'}})
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_disable_balancer_session_persistence(self, balancer):
+ """
+ Disables session persistence for a Balancer. This method blocks until
+ the disable request has been processed and the balancer is in a RUNNING
+ state again.
+
+ :param balancer: Balancer to disable session persistence on.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_disable_balancer_session_persistence_no_poll(balancer):
+ msg = 'Disable session persistence request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_disable_balancer_session_persistence_no_poll(self, balancer):
+ """
+ Disables session persistence for a Balancer. This method returns
+ immediately.
+
+ :param balancer: Balancer to disable session persistence for.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the disable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/sessionpersistence' % (balancer.id)
+ resp = self.connection.request(uri, method='DELETE')
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_update_balancer_error_page(self, balancer, page_content):
+ """
+ Updates a Balancer's custom error page. This method blocks until
+ the update request has been processed and the balancer is in a
+ RUNNING state again.
+
+ :param balancer: Balancer to update the custom error page for.
+ :type balancer: :class:`LoadBalancer`
+
+ :param page_content: HTML content for the custom error page.
+ :type page_content: ``str``
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ accepted = self.ex_update_balancer_error_page_no_poll(balancer,
+ page_content)
+ if not accepted:
+ msg = 'Update error page request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_update_balancer_error_page_no_poll(self, balancer, page_content):
+ """
+ Updates a Balancer's custom error page. This method returns
+ immediately.
+
+ :param balancer: Balancer to update the custom error page for.
+ :type balancer: :class:`LoadBalancer`
+
+ :param page_content: HTML content for the custom error page.
+ :type page_content: ``str``
+
+ :return: Returns whether the update request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/errorpage' % (balancer.id)
+ resp = self.connection.request(
+ uri, method='PUT',
+ data=json.dumps({'errorpage': {'content': page_content}})
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_disable_balancer_custom_error_page(self, balancer):
+ """
+ Disables a Balancer's custom error page, returning its error page to
+ the Rackspace-provided default. This method blocks until the disable
+ request has been processed and the balancer is in a RUNNING state
+ again.
+
+ :param balancer: Balancer to disable the custom error page for.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ if not self.ex_disable_balancer_custom_error_page_no_poll(balancer):
+ msg = 'Disable custom error page request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_disable_balancer_custom_error_page_no_poll(self, balancer):
+ """
+ Disables a Balancer's custom error page, returning its error page to
+ the Rackspace-provided default. This method returns immediately.
+
+ :param balancer: Balancer to disable the custom error page for.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Returns whether the disable request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/errorpage' % (balancer.id)
+ resp = self.connection.request(uri, method='DELETE')
+
+ # Load Balancer API currently returns 200 OK on custom error page
+ # delete.
+ return resp.status == httplib.OK or resp.status == httplib.ACCEPTED
+
+ def ex_create_balancer_access_rule(self, balancer, rule):
+ """
+ Adds an access rule to a Balancer's access list. This method blocks
+ until the update request has been processed and the balancer is in a
+ RUNNING state again.
+
+ :param balancer: Balancer to create the access rule for.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rule: Access Rule to add to the balancer.
+ :type rule: :class:`RackspaceAccessRule`
+
+ :return: The created access rule.
+ :rtype: :class:`RackspaceAccessRule`
+ """
+ accepted = self.ex_create_balancer_access_rule_no_poll(balancer, rule)
+ if not accepted:
+ msg = 'Create access rule not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ balancer = self._get_updated_balancer(balancer)
+ access_list = balancer.extra['accessList']
+
+ created_rule = self._find_matching_rule(rule, access_list)
+
+ if not created_rule:
+ raise LibcloudError('Could not find created rule')
+
+ return created_rule
+
+ def ex_create_balancer_access_rule_no_poll(self, balancer, rule):
+ """
+ Adds an access rule to a Balancer's access list. This method returns
+ immediately.
+
+ :param balancer: Balancer to create the access rule for.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rule: Access Rule to add to the balancer.
+ :type rule: :class:`RackspaceAccessRule`
+
+ :return: Returns whether the create request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/accesslist' % (balancer.id)
+ resp = self.connection.request(
+ uri, method='POST',
+ data=json.dumps({'networkItem': rule._to_dict()})
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_create_balancer_access_rules(self, balancer, rules):
+ """
+ Adds a list of access rules to a Balancer's access list. This method
+ blocks until the update request has been processed and the balancer is
+ in a RUNNING state again.
+
+ :param balancer: Balancer to create the access rule for.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rules: List of :class:`RackspaceAccessRule` to add to the
+ balancer.
+ :type rules: ``list`` of :class:`RackspaceAccessRule`
+
+ :return: The created access rules.
+ :rtype: :class:`RackspaceAccessRule`
+ """
+ accepted = self.ex_create_balancer_access_rules_no_poll(balancer,
+ rules)
+ if not accepted:
+ msg = 'Create access rules not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ balancer = self._get_updated_balancer(balancer)
+ access_list = balancer.extra['accessList']
+
+ created_rules = []
+ for r in rules:
+ matched_rule = self._find_matching_rule(r, access_list)
+ if matched_rule:
+ created_rules.append(matched_rule)
+
+ if len(created_rules) != len(rules):
+ raise LibcloudError('Could not find all created rules')
+
+ return created_rules
+
+ def _find_matching_rule(self, rule_to_find, access_list):
+ """
+ LB API does not return the ID for the newly created rules, so we have
+ to search the list to find the rule with a matching rule type and
+ address to return an object with the right identifier.it. The API
+ enforces rule type and address uniqueness.
+ """
+ for r in access_list:
+ if rule_to_find.rule_type == r.rule_type and\
+ rule_to_find.address == r.address:
+ return r
+
+ return None
+
+ def ex_create_balancer_access_rules_no_poll(self, balancer, rules):
+ """
+ Adds a list of access rules to a Balancer's access list. This method
+ returns immediately.
+
+ :param balancer: Balancer to create the access rule for.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rules: List of :class:`RackspaceAccessRule` to add to
+ the balancer.
+ :type rules: ``list`` of :class:`RackspaceAccessRule`
+
+ :return: Returns whether the create request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/accesslist' % (balancer.id)
+ resp = self.connection.request(
+ uri, method='POST',
+ data=json.dumps({'accessList':
+ [rule._to_dict() for rule in rules]})
+ )
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_destroy_balancer_access_rule(self, balancer, rule):
+ """
+ Removes an access rule from a Balancer's access list. This method
+ blocks until the update request has been processed and the balancer
+ is in a RUNNING state again.
+
+ :param balancer: Balancer to remove the access rule from.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rule: Access Rule to remove from the balancer.
+ :type rule: :class:`RackspaceAccessRule`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ accepted = self.ex_destroy_balancer_access_rule_no_poll(balancer, rule)
+ if not accepted:
+ msg = 'Delete access rule not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_destroy_balancer_access_rule_no_poll(self, balancer, rule):
+ """
+ Removes an access rule from a Balancer's access list. This method
+ returns immediately.
+
+ :param balancer: Balancer to remove the access rule from.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rule: Access Rule to remove from the balancer.
+ :type rule: :class:`RackspaceAccessRule`
+
+ :return: Returns whether the destroy request was accepted.
+ :rtype: ``bool``
+ """
+ uri = '/loadbalancers/%s/accesslist/%s' % (balancer.id, rule.id)
+ resp = self.connection.request(uri, method='DELETE')
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_destroy_balancer_access_rules(self, balancer, rules):
+ """
+ Removes a list of access rules from a Balancer's access list. This
+ method blocks until the update request has been processed and the
+ balancer is in a RUNNING state again.
+
+ :param balancer: Balancer to remove the access rules from.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rules: List of :class:`RackspaceAccessRule` objects to remove
+ from the balancer.
+ :type rules: ``list`` of :class:`RackspaceAccessRule`
+
+ :return: Updated Balancer.
+ :rtype: :class:`LoadBalancer`
+ """
+ accepted = self.ex_destroy_balancer_access_rules_no_poll(
+ balancer, rules)
+
+ if not accepted:
+ msg = 'Destroy access rules request not accepted'
+ raise LibcloudError(msg, driver=self)
+
+ return self._get_updated_balancer(balancer)
+
+ def ex_destroy_balancer_access_rules_no_poll(self, balancer, rules):
+ """
+ Removes a list of access rules from a Balancer's access list. This
+ method returns immediately.
+
+ :param balancer: Balancer to remove the access rules from.
+ :type balancer: :class:`LoadBalancer`
+
+ :param rules: List of :class:`RackspaceAccessRule` objects to remove
+ from the balancer.
+ :type rules: ``list`` of :class:`RackspaceAccessRule`
+
+ :return: Returns whether the destroy request was accepted.
+ :rtype: ``bool``
+ """
+ ids = [('id', rule.id) for rule in rules]
+ uri = '/loadbalancers/%s/accesslist' % balancer.id
+
+ resp = self.connection.request(uri,
+ method='DELETE',
+ params=ids)
+
+ return resp.status == httplib.ACCEPTED
+
+ def ex_list_current_usage(self, balancer):
+ """
+ Return current load balancer usage report.
+
+ :param balancer: Balancer to remove the access rules from.
+ :type balancer: :class:`LoadBalancer`
+
+ :return: Raw load balancer usage object.
+ :rtype: ``dict``
+ """
+ uri = '/loadbalancers/%s/usage/current' % (balancer.id)
+ resp = self.connection.request(uri, method='GET')
+
+ return resp.object
+
+ def _to_protocols(self, object):
+ protocols = []
+ for item in object["protocols"]:
+ protocols.append(item['name'].lower())
+ return protocols
+
+ def _to_protocols_with_default_ports(self, object):
+ protocols = []
+ for item in object["protocols"]:
+ name = item['name'].lower()
+ port = int(item['port'])
+ protocols.append((name, port))
+
+ return protocols
+
+ def _to_balancers(self, object):
+ return [self._to_balancer(el) for el in object["loadBalancers"]]
+
+ def _to_balancer(self, el):
+ ip = None
+ port = None
+ sourceAddresses = {}
+
+ if 'port' in el:
+ port = el["port"]
+
+ if 'sourceAddresses' in el:
+ sourceAddresses = el['sourceAddresses']
+
+ extra = {
+ "ipv6PublicSource": sourceAddresses.get("ipv6Public"),
+ "ipv4PublicSource": sourceAddresses.get("ipv4Public"),
+ "ipv4PrivateSource": sourceAddresses.get("ipv4Servicenet"),
+ }
+
+ if 'virtualIps' in el:
+ ip = el['virtualIps'][0]['address']
+ extra['virtualIps'] = el['virtualIps']
+
+ if 'protocol' in el:
+ extra['protocol'] = el['protocol']
+
+ if 'algorithm' in el and \
+ el["algorithm"] in self._VALUE_TO_ALGORITHM_MAP:
+ extra["algorithm"] = self._value_to_algorithm(el["algorithm"])
+
+ if 'healthMonitor' in el:
+ health_monitor = self._to_health_monitor(el)
+ if health_monitor:
+ extra["healthMonitor"] = health_monitor
+
+ if 'connectionThrottle' in el:
+ extra["connectionThrottle"] = self._to_connection_throttle(el)
+
+ if 'sessionPersistence' in el:
+ persistence = el["sessionPersistence"]
+ extra["sessionPersistenceType"] =\
+ persistence.get("persistenceType")
+
+ if 'connectionLogging' in el:
+ logging = el["connectionLogging"]
+ extra["connectionLoggingEnabled"] = logging.get("enabled")
+
+ if 'nodes' in el:
+ extra['members'] = self._to_members(el)
+
+ if 'created' in el:
+ extra['created'] = self._iso_to_datetime(el['created']['time'])
+
+ if 'updated' in el:
+ extra['updated'] = self._iso_to_datetime(el['updated']['time'])
+
+ if 'accessList' in el:
+ extra['accessList'] = [self._to_access_rule(rule)
+ for rule in el['accessList']]
+
+ return LoadBalancer(id=el["id"],
+ name=el["name"],
+ state=self.LB_STATE_MAP.get(
+ el["status"], State.UNKNOWN),
+ ip=ip,
+ port=port,
+ driver=self.connection.driver,
+ extra=extra)
+
+ def _to_members(self, object, balancer=None):
+ return [self._to_member(el, balancer) for el in object["nodes"]]
+
+ def _to_member(self, el, balancer=None):
+ extra = {}
+ if 'weight' in el:
+ extra['weight'] = el["weight"]
+
+ if 'condition' in el and\
+ el['condition'] in self.LB_MEMBER_CONDITION_MAP:
+ extra['condition'] =\
+ self.LB_MEMBER_CONDITION_MAP.get(el["condition"])
+
+ if 'status' in el:
+ extra['status'] = el["status"]
+
+ lbmember = Member(id=el["id"],
+ ip=el["address"],
+ port=el["port"],
+ balancer=balancer,
+ extra=extra)
+ return lbmember
+
+ def _protocol_to_value(self, protocol):
+ non_standard_protocols = {'imapv2': 'IMAPv2', 'imapv3': 'IMAPv3',
+ 'imapv4': 'IMAPv4'}
+ protocol_name = protocol.lower()
+
+ if protocol_name in non_standard_protocols:
+ protocol_value = non_standard_protocols[protocol_name]
+ else:
+ protocol_value = protocol.upper()
+
+ return protocol_value
+
+ def _kwargs_to_mutable_attrs(self, **attrs):
+ update_attrs = {}
+ if "name" in attrs:
+ update_attrs['name'] = attrs['name']
+
+ if "algorithm" in attrs:
+ algorithm_value = self._algorithm_to_value(attrs['algorithm'])
+ update_attrs['algorithm'] = algorithm_value
+
+ if "protocol" in attrs:
+ update_attrs['protocol'] =\
+ self._protocol_to_value(attrs['protocol'])
+
+ if "port" in attrs:
+ update_attrs['port'] = int(attrs['port'])
+
+ if "vip" in attrs:
+ if attrs['vip'] == 'PUBLIC' or attrs['vip'] == 'SERVICENET':
+ update_attrs['virtualIps'] = [{'type': attrs['vip']}]
+ else:
+ update_attrs['virtualIps'] = [{'id': attrs['vip']}]
+
+ return update_attrs
+
+ def _kwargs_to_mutable_member_attrs(self, **attrs):
+ update_attrs = {}
+ if 'condition' in attrs:
+ update_attrs['condition'] =\
+ self.CONDITION_LB_MEMBER_MAP.get(attrs['condition'])
+
+ if 'weight' in attrs:
+ update_attrs['weight'] = attrs['weight']
+
+ return update_attrs
+
+ def _to_health_monitor(self, el):
+ health_monitor_data = el["healthMonitor"]
+
+ type = health_monitor_data.get("type")
+ delay = health_monitor_data.get("delay")
+ timeout = health_monitor_data.get("timeout")
+ attempts_before_deactivation =\
+ health_monitor_data.get("attemptsBeforeDeactivation")
+
+ if type == "CONNECT":
+ return RackspaceHealthMonitor(
+ type=type, delay=delay, timeout=timeout,
+ attempts_before_deactivation=attempts_before_deactivation)
+
+ if type == "HTTP" or type == "HTTPS":
+ return RackspaceHTTPHealthMonitor(
+ type=type, delay=delay, timeout=timeout,
+ attempts_before_deactivation=attempts_before_deactivation,
+ path=health_monitor_data.get("path"),
+ status_regex=health_monitor_data.get("statusRegex"),
+ body_regex=health_monitor_data.get("bodyRegex", ''))
+
+ return None
+
+ def _to_connection_throttle(self, el):
+ connection_throttle_data = el["connectionThrottle"]
+
+ min_connections = connection_throttle_data.get("minConnections")
+ max_connections = connection_throttle_data.get("maxConnections")
+ max_connection_rate = connection_throttle_data.get("maxConnectionRate")
+ rate_interval = connection_throttle_data.get("rateInterval")
+
+ return RackspaceConnectionThrottle(
+ min_connections=min_connections,
+ max_connections=max_connections,
+ max_connection_rate=max_connection_rate,
+ rate_interval_seconds=rate_interval)
+
+ def _to_access_rule(self, el):
+ return RackspaceAccessRule(
+ id=el.get("id"),
+ rule_type=self._to_access_rule_type(el.get("type")),
+ address=el.get("address"))
+
+ def _to_access_rule_type(self, type):
+ if type == "ALLOW":
+ return RackspaceAccessRuleType.ALLOW
+ elif type == "DENY":
+ return RackspaceAccessRuleType.DENY
+
+ def _iso_to_datetime(self, isodate):
+ date_formats = ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S%z')
+ date = None
+
+ for date_format in date_formats:
+ try:
+ date = datetime.strptime(isodate, date_format)
+ except ValueError:
+ pass
+
+ if date:
+ break
+
+ return date
+
+
+class RackspaceUKLBDriver(RackspaceLBDriver):
+ def __init__(self, *args, **kwargs):
+ kwargs['region'] = 'lon'
+ super(RackspaceUKLBDriver, self).__init__(*args, **kwargs)
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/providers.py b/awx/lib/site-packages/libcloud/loadbalancer/providers.py
new file mode 100644
index 0000000000..f11e450500
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/providers.py
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.utils.misc import get_driver as get_provider_driver
+from libcloud.utils.misc import set_driver as set_provider_driver
+from libcloud.loadbalancer.types import Provider
+
+__all__ = [
+ "Provider",
+ "DRIVERS",
+ "get_driver",
+]
+
+DRIVERS = {
+ Provider.RACKSPACE:
+ ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'),
+ Provider.GOGRID:
+ ('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'),
+ Provider.NINEFOLD:
+ ('libcloud.loadbalancer.drivers.ninefold', 'NinefoldLBDriver'),
+ Provider.BRIGHTBOX:
+ ('libcloud.loadbalancer.drivers.brightbox', 'BrightboxLBDriver'),
+ Provider.ELB:
+ ('libcloud.loadbalancer.drivers.elb', 'ElasticLBDriver'),
+ Provider.CLOUDSTACK:
+ ('libcloud.loadbalancer.drivers.cloudstack', 'CloudStackLBDriver'),
+ Provider.GCE:
+ ('libcloud.loadbalancer.drivers.gce', 'GCELBDriver'),
+
+ # Deprecated
+ Provider.RACKSPACE_US:
+ ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'),
+ Provider.RACKSPACE_UK:
+ ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceUKLBDriver'),
+}
+
+
+def get_driver(provider):
+ return get_provider_driver(DRIVERS, provider)
+
+
+def set_driver(provider, module, klass):
+ return set_provider_driver(DRIVERS, provider, module, klass)
diff --git a/awx/lib/site-packages/libcloud/loadbalancer/types.py b/awx/lib/site-packages/libcloud/loadbalancer/types.py
new file mode 100644
index 0000000000..6be53e5430
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/loadbalancer/types.py
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ "Provider",
+ "State",
+ "LibcloudLBError",
+ "LibcloudLBImmutableError",
+]
+
+from libcloud.common.types import LibcloudError
+
+
+class LibcloudLBError(LibcloudError):
+ pass
+
+
+class LibcloudLBImmutableError(LibcloudLBError):
+ pass
+
+
+class Provider(object):
+ RACKSPACE = 'rackspace'
+ GOGRID = 'gogrid'
+ NINEFOLD = 'ninefold'
+ BRIGHTBOX = 'brightbox'
+ ELB = 'elb'
+ CLOUDSTACK = 'cloudstack'
+ GCE = 'gce'
+
+ # Deprecated
+ RACKSPACE_US = 'rackspace_us'
+ RACKSPACE_UK = 'rackspace_uk'
+
+
+class State(object):
+ """
+ Standard states for a loadbalancer
+
+ :cvar RUNNING: loadbalancer is running and ready to use
+ :cvar UNKNOWN: loabalancer state is unknown
+ """
+
+ RUNNING = 0
+ PENDING = 1
+ UNKNOWN = 2
+ ERROR = 3
+ DELETED = 4
+
+
+class MemberCondition(object):
+ """
+ Each member of a load balancer can have an associated condition
+ which determines its role within the load balancer.
+ """
+ ENABLED = 0
+ DISABLED = 1
+ DRAINING = 2
diff --git a/awx/lib/site-packages/libcloud/pricing.py b/awx/lib/site-packages/libcloud/pricing.py
new file mode 100644
index 0000000000..ebe99a8aa5
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/pricing.py
@@ -0,0 +1,216 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import with_statement
+
+"""
+A class which handles loading the pricing files.
+"""
+
+import os.path
+from os.path import join as pjoin
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.connection import get_response_object
+
+__all__ = [
+ 'get_pricing',
+ 'get_size_price',
+ 'set_pricing',
+ 'clear_pricing_data',
+ 'download_pricing_file'
+]
+
+# Default URL to the pricing file
+DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' # NOQA
+
+CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
+DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json')
+CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json')
+
+# Pricing data cache
+PRICING_DATA = {
+ 'compute': {},
+ 'storage': {}
+}
+
+VALID_PRICING_DRIVER_TYPES = ['compute', 'storage']
+
+
+def get_pricing_file_path(file_path=None):
+ if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \
+ os.path.isfile(CUSTOM_PRICING_FILE_PATH):
+ # Custom pricing file is available, use it
+ return CUSTOM_PRICING_FILE_PATH
+
+ return DEFAULT_PRICING_FILE_PATH
+
+
+def get_pricing(driver_type, driver_name, pricing_file_path=None):
+ """
+ Return pricing for the provided driver.
+
+ :type driver_type: ``str``
+ :param driver_type: Driver type ('compute' or 'storage')
+
+ :type driver_name: ``str`
+ :param driver_name: Driver name
+
+ :type pricing_file_path: ``str``
+ :param pricing_file_path: Custom path to a price file. If not provided
+ it uses a default path.
+
+ :rtype: ``dict``
+ :return: Dictionary with pricing where a key name is size ID and
+ the value is a price.
+ """
+ if driver_type not in VALID_PRICING_DRIVER_TYPES:
+ raise AttributeError('Invalid driver type: %s', driver_type)
+
+ if driver_name in PRICING_DATA[driver_type]:
+ return PRICING_DATA[driver_type][driver_name]
+
+ if not pricing_file_path:
+ pricing_file_path = get_pricing_file_path(file_path=pricing_file_path)
+
+ with open(pricing_file_path) as fp:
+ content = fp.read()
+
+ pricing_data = json.loads(content)
+ size_pricing = pricing_data[driver_type][driver_name]
+
+ for driver_type in VALID_PRICING_DRIVER_TYPES:
+ pricing = pricing_data.get(driver_type, None)
+ if pricing:
+ PRICING_DATA[driver_type] = pricing
+
+ return size_pricing
+
+
+def set_pricing(driver_type, driver_name, pricing):
+ """
+ Populate the driver pricing dictionary.
+
+ :type driver_type: ``str``
+ :param driver_type: Driver type ('compute' or 'storage')
+
+ :type driver_name: ``str``
+ :param driver_name: Driver name
+
+ :type pricing: ``dict``
+ :param pricing: Dictionary where a key is a size ID and a value is a price.
+ """
+
+ PRICING_DATA[driver_type][driver_name] = pricing
+
+
+def get_size_price(driver_type, driver_name, size_id):
+ """
+ Return price for the provided size.
+
+ :type driver_type: ``str``
+ :param driver_type: Driver type ('compute' or 'storage')
+
+ :type driver_name: ``str``
+ :param driver_name: Driver name
+
+ :type size_id: ``str`` or ``int``
+ :param size_id: Unique size ID (can be an integer or a string - depends on
+ the driver)
+
+ :rtype: ``float``
+ :return: Size price.
+ """
+ pricing = get_pricing(driver_type=driver_type, driver_name=driver_name)
+ price = float(pricing[size_id])
+ return price
+
+
+def invalidate_pricing_cache():
+ """
+ Invalidate pricing cache for all the drivers.
+ """
+ PRICING_DATA['compute'] = {}
+ PRICING_DATA['storage'] = {}
+
+
+def clear_pricing_data():
+ """
+ Invalidate pricing cache for all the drivers.
+
+ Note: This method does the same thing as invalidate_pricing_cache and is
+ here for backward compatibility reasons.
+ """
+ invalidate_pricing_cache()
+
+
+def invalidate_module_pricing_cache(driver_type, driver_name):
+ """
+ Invalidate the cache for the specified driver.
+
+ :type driver_type: ``str``
+ :param driver_type: Driver type ('compute' or 'storage')
+
+ :type driver_name: ``str``
+ :param driver_name: Driver name
+ """
+ if driver_name in PRICING_DATA[driver_type]:
+ del PRICING_DATA[driver_type][driver_name]
+
+
+def download_pricing_file(file_url=DEFAULT_FILE_URL,
+ file_path=CUSTOM_PRICING_FILE_PATH):
+ """
+ Download pricing file from the file_url and save it to file_path.
+
+ :type file_url: ``str``
+ :param file_url: URL pointing to the pricing file.
+
+ :type file_path: ``str``
+ :param file_path: Path where a download pricing file will be saved.
+ """
+ dir_name = os.path.dirname(file_path)
+
+ if not os.path.exists(dir_name):
+ # Verify a valid path is provided
+ msg = ('Can\'t write to %s, directory %s, doesn\'t exist' %
+ (file_path, dir_name))
+ raise ValueError(msg)
+
+ if os.path.exists(file_path) and os.path.isdir(file_path):
+ msg = ('Can\'t write to %s file path because it\'s a'
+ ' directory' % (file_path))
+ raise ValueError(msg)
+
+ response = get_response_object(file_url)
+ body = response.body
+
+ # Verify pricing file is valid
+ try:
+ data = json.loads(body)
+ except json.decoder.JSONDecodeError:
+ msg = 'Provided URL doesn\'t contain valid pricing data'
+ raise Exception(msg)
+
+ if not data.get('updated', None):
+ msg = 'Provided URL doesn\'t contain valid pricing data'
+ raise Exception(msg)
+
+ # No need to stream it since file is small
+ with open(file_path, 'w') as file_handle:
+ file_handle.write(body)
diff --git a/awx/lib/site-packages/libcloud/security.py b/awx/lib/site-packages/libcloud/security.py
new file mode 100644
index 0000000000..81d7a51f67
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/security.py
@@ -0,0 +1,80 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Security (SSL) Settings
+
+Usage:
+ import libcloud.security
+ libcloud.security.VERIFY_SSL_CERT = True
+
+ # Optional.
+ libcloud.security.CA_CERTS_PATH.append('/path/to/cacert.txt')
+"""
+
+import os
+
+VERIFY_SSL_CERT = True
+
+# File containing one or more PEM-encoded CA certificates
+# concatenated together.
+CA_CERTS_PATH = [
+ # centos/fedora: openssl
+ '/etc/pki/tls/certs/ca-bundle.crt',
+
+ # debian/ubuntu/arch/gentoo: ca-certificates
+ '/etc/ssl/certs/ca-certificates.crt',
+
+ # freebsd: ca_root_nss
+ '/usr/local/share/certs/ca-root-nss.crt',
+
+ # macports: curl-ca-bundle
+ '/opt/local/share/curl/curl-ca-bundle.crt',
+
+ # homebrew: openssl
+ '/usr/local/etc/openssl/cert.pem',
+
+ # homebrew: curl-ca-bundle (backward compatibility)
+ '/usr/local/opt/curl-ca-bundle/share/ca-bundle.crt',
+]
+
+# Allow user to explicitly specify which CA bundle to use, using an environment
+# variable
+environment_cert_file = os.getenv('SSL_CERT_FILE', None)
+if environment_cert_file is not None:
+ # Make sure the file exists
+ if not os.path.exists(environment_cert_file):
+ raise ValueError('Certificate file %s doesn\'t exist' %
+ (environment_cert_file))
+
+ if not os.path.isfile(environment_cert_file):
+ raise ValueError('Certificate file can\'t be a directory')
+
+ # If a provided file exists we ignore other common paths because we
+ # don't want to fall-back to a potentially less restrictive bundle
+ CA_CERTS_PATH = [environment_cert_file]
+
+CA_CERTS_UNAVAILABLE_ERROR_MSG = (
+ 'No CA Certificates were found in CA_CERTS_PATH. For information on '
+ 'how to get required certificate files, please visit '
+ 'https://libcloud.readthedocs.org/en/latest/other/'
+ 'ssl-certificate-validation.html'
+)
+
+VERIFY_SSL_DISABLED_MSG = (
+ 'SSL certificate verification is disabled, this can pose a '
+ 'security risk. For more information how to enable the SSL '
+ 'certificate verification, please visit the libcloud '
+ 'documentation.'
+)
diff --git a/awx/lib/site-packages/libcloud/storage/__init__.py b/awx/lib/site-packages/libcloud/storage/__init__.py
new file mode 100644
index 0000000000..f73ddf07a7
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/__init__.py
@@ -0,0 +1,3 @@
+"""
+Module for working with Storage
+"""
diff --git a/awx/lib/site-packages/libcloud/storage/base.py b/awx/lib/site-packages/libcloud/storage/base.py
new file mode 100644
index 0000000000..f12b906409
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/base.py
@@ -0,0 +1,825 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides base classes for working with storage
+"""
+
+# Backward compatibility for Python 2.5
+from __future__ import with_statement
+
+import os.path # pylint: disable-msg=W0404
+import hashlib
+from os.path import join as pjoin
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import next
+from libcloud.utils.py3 import b
+
+import libcloud.utils.files
+from libcloud.common.types import LibcloudError
+from libcloud.common.base import ConnectionUserAndKey, BaseDriver
+from libcloud.storage.types import ObjectDoesNotExistError
+
+__all__ = [
+ 'Object',
+ 'Container',
+ 'StorageDriver',
+
+ 'CHUNK_SIZE',
+ 'DEFAULT_CONTENT_TYPE'
+]
+
+CHUNK_SIZE = 8096
+
+# Default Content-Type which is sent when uploading an object if one is not
+# supplied and can't be detected when using non-strict mode.
+DEFAULT_CONTENT_TYPE = 'application/octet-stream'
+
+
+class Object(object):
+ """
+ Represents an object (BLOB).
+ """
+
+ def __init__(self, name, size, hash, extra, meta_data, container,
+ driver):
+ """
+ :param name: Object name (must be unique per container).
+ :type name: ``str``
+
+ :param size: Object size in bytes.
+ :type size: ``int``
+
+ :param hash: Object hash.
+ :type hash: ``str``
+
+ :param container: Object container.
+ :type container: :class:`Container`
+
+ :param extra: Extra attributes.
+ :type extra: ``dict``
+
+ :param meta_data: Optional object meta data.
+ :type meta_data: ``dict``
+
+ :param driver: StorageDriver instance.
+ :type driver: :class:`StorageDriver`
+ """
+
+ self.name = name
+ self.size = size
+ self.hash = hash
+ self.container = container
+ self.extra = extra or {}
+ self.meta_data = meta_data or {}
+ self.driver = driver
+
+ def get_cdn_url(self):
+ return self.driver.get_object_cdn_url(obj=self)
+
+ def enable_cdn(self, **kwargs):
+ return self.driver.enable_object_cdn(obj=self, **kwargs)
+
+ def download(self, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ return self.driver.download_object(self, destination_path,
+ overwrite_existing,
+ delete_on_failure)
+
+ def as_stream(self, chunk_size=None):
+ return self.driver.download_object_as_stream(self, chunk_size)
+
+ def delete(self):
+ return self.driver.delete_object(self)
+
+ def __repr__(self):
+ return ('' %
+ (self.name, self.size, self.hash, self.driver.name))
+
+
+class Container(object):
+ """
+ Represents a container (bucket) which can hold multiple objects.
+ """
+
+ def __init__(self, name, extra, driver):
+ """
+ :param name: Container name (must be unique).
+ :type name: ``str``
+
+ :param extra: Extra attributes.
+ :type extra: ``dict``
+
+ :param driver: StorageDriver instance.
+ :type driver: :class:`StorageDriver`
+ """
+
+ self.name = name
+ self.extra = extra or {}
+ self.driver = driver
+
+ def iterate_objects(self):
+ return self.driver.iterate_container_objects(container=self)
+
+ def list_objects(self):
+ return self.driver.list_container_objects(container=self)
+
+ def get_cdn_url(self):
+ return self.driver.get_container_cdn_url(container=self)
+
+ def enable_cdn(self, **kwargs):
+ return self.driver.enable_container_cdn(container=self, **kwargs)
+
+ def get_object(self, object_name):
+ return self.driver.get_object(container_name=self.name,
+ object_name=object_name)
+
+ def upload_object(self, file_path, object_name, extra=None, **kwargs):
+ return self.driver.upload_object(
+ file_path, self, object_name, extra=extra, **kwargs)
+
+ def upload_object_via_stream(self, iterator, object_name, extra=None,
+ **kwargs):
+ return self.driver.upload_object_via_stream(
+ iterator, self, object_name, extra=extra, **kwargs)
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ return self.driver.download_object(
+ obj, destination_path, overwrite_existing=overwrite_existing,
+ delete_on_failure=delete_on_failure)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ return self.driver.download_object_as_stream(obj, chunk_size)
+
+ def delete_object(self, obj):
+ return self.driver.delete_object(obj)
+
+ def delete(self):
+ return self.driver.delete_container(self)
+
+ def __repr__(self):
+ return (''
+ % (self.name, self.driver.name))
+
+
+class StorageDriver(BaseDriver):
+ """
+ A base StorageDriver to derive from.
+ """
+
+ connectionCls = ConnectionUserAndKey
+ name = None
+ hash_type = 'md5'
+ supports_chunked_encoding = False
+
+ # When strict mode is used, exception will be thrown if no content type is
+ # provided and none can be detected when uploading an object
+ strict_mode = False
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ **kwargs):
+ super(StorageDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, **kwargs)
+
+ def iterate_containers(self):
+ """
+ Return a generator of containers for the given account
+
+ :return: A generator of Container instances.
+ :rtype: ``generator`` of :class:`Container`
+ """
+ raise NotImplementedError(
+ 'iterate_containers not implemented for this driver')
+
+ def list_containers(self):
+ """
+ Return a list of containers.
+
+ :return: A list of Container instances.
+ :rtype: ``list`` of :class:`Container`
+ """
+ return list(self.iterate_containers())
+
+ def iterate_container_objects(self, container):
+ """
+ Return a generator of objects for the given container.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :return: A generator of Object instances.
+ :rtype: ``generator`` of :class:`Object`
+ """
+ raise NotImplementedError(
+ 'iterate_container_objects not implemented for this driver')
+
+ def list_container_objects(self, container):
+ """
+ Return a list of objects for the given container.
+
+ :param container: Container instance.
+ :type container: :class:`Container`
+
+ :return: A list of Object instances.
+ :rtype: ``list`` of :class:`Object`
+ """
+ return list(self.iterate_container_objects(container))
+
+ def get_container(self, container_name):
+ """
+ Return a container instance.
+
+ :param container_name: Container name.
+ :type container_name: ``str``
+
+ :return: :class:`Container` instance.
+ :rtype: :class:`Container`
+ """
+ raise NotImplementedError(
+ 'get_object not implemented for this driver')
+
+ def get_container_cdn_url(self, container):
+ """
+ Return a container CDN URL.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :return: A CDN URL for this container.
+ :rtype: ``str``
+ """
+ raise NotImplementedError(
+ 'get_container_cdn_url not implemented for this driver')
+
+ def get_object(self, container_name, object_name):
+ """
+ Return an object instance.
+
+ :param container_name: Container name.
+ :type container_name: ``str``
+
+ :param object_name: Object name.
+ :type object_name: ``str``
+
+ :return: :class:`Object` instance.
+ :rtype: :class:`Object`
+ """
+ raise NotImplementedError(
+ 'get_object not implemented for this driver')
+
+ def get_object_cdn_url(self, obj):
+ """
+ Return a object CDN URL.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :return: A CDN URL for this object.
+ :rtype: ``str``
+ """
+ raise NotImplementedError(
+ 'get_object_cdn_url not implemented for this driver')
+
+ def enable_container_cdn(self, container):
+ """
+ Enable container CDN.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'enable_container_cdn not implemented for this driver')
+
+ def enable_object_cdn(self, obj):
+ """
+ Enable object CDN.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'enable_object_cdn not implemented for this driver')
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ """
+ Download an object to the specified destination path.
+
+ :param obj: Object instance.
+ :type obj: :class:`Object`
+
+ :param destination_path: Full path to a file or a directory where the
+ incoming file will be saved.
+ :type destination_path: ``str``
+
+ :param overwrite_existing: True to overwrite an existing file,
+ defaults to False.
+ :type overwrite_existing: ``bool``
+
+ :param delete_on_failure: True to delete a partially downloaded file if
+ the download was not successful (hash
+ mismatch / file size).
+ :type delete_on_failure: ``bool``
+
+ :return: True if an object has been successfully downloaded, False
+ otherwise.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'download_object not implemented for this driver')
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ """
+ Return a generator which yields object data.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :param chunk_size: Optional chunk size (in bytes).
+ :type chunk_size: ``int``
+ """
+ raise NotImplementedError(
+ 'download_object_as_stream not implemented for this driver')
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True):
+ """
+ Upload an object currently located on a disk.
+
+ :param file_path: Path to the object on disk.
+ :type file_path: ``str``
+
+ :param container: Destination container.
+ :type container: :class:`Container`
+
+ :param object_name: Object name.
+ :type object_name: ``str``
+
+ :param verify_hash: Verify hash
+ :type verify_hash: ``bool``
+
+ :param extra: Extra attributes (driver specific). (optional)
+ :type extra: ``dict``
+
+ :rtype: :class:`Object`
+ """
+ raise NotImplementedError(
+ 'upload_object not implemented for this driver')
+
+ def upload_object_via_stream(self, iterator, container,
+ object_name,
+ extra=None):
+ """
+ Upload an object using an iterator.
+
+ If a provider supports it, chunked transfer encoding is used and you
+ don't need to know in advance the amount of data to be uploaded.
+
+ Otherwise if a provider doesn't support it, iterator will be exhausted
+ so a total size for data to be uploaded can be determined.
+
+ Note: Exhausting the iterator means that the whole data must be
+ buffered in memory which might result in memory exhausting when
+ uploading a very large object.
+
+ If a file is located on a disk you are advised to use upload_object
+ function which uses fs.stat function to determine the file size and it
+ doesn't need to buffer whole object in the memory.
+
+ :type iterator: :class:`object`
+ :param iterator: An object which implements the iterator interface.
+
+ :type container: :class:`Container`
+ :param container: Destination container.
+
+ :type object_name: ``str``
+ :param object_name: Object name.
+
+ :type extra: ``dict``
+ :param extra: (optional) Extra attributes (driver specific). Note:
+ This dictionary must contain a 'content_type' key which represents
+ a content type of the stored object.
+
+ :rtype: ``object``
+ """
+ raise NotImplementedError(
+ 'upload_object_via_stream not implemented for this driver')
+
+ def delete_object(self, obj):
+ """
+ Delete an object.
+
+ :type obj: :class:`Object`
+ :param obj: Object instance.
+
+ :return: ``bool`` True on success.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'delete_object not implemented for this driver')
+
+ def create_container(self, container_name):
+ """
+ Create a new container.
+
+ :type container_name: ``str``
+ :param container_name: Container name.
+
+ :return: Container instance on success.
+ :rtype: :class:`Container`
+ """
+ raise NotImplementedError(
+ 'create_container not implemented for this driver')
+
+ def delete_container(self, container):
+ """
+ Delete a container.
+
+ :type container: :class:`Container`
+ :param container: Container instance
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ raise NotImplementedError(
+ 'delete_container not implemented for this driver')
+
+ def _get_object(self, obj, callback, callback_kwargs, response,
+ success_status_code=None):
+ """
+ Call passed callback and start transfer of the object'
+
+ :type obj: :class:`Object`
+ :param obj: Object instance.
+
+ :type callback: :class:`function`
+ :param callback: Function which is called with the passed
+ callback_kwargs
+
+ :type callback_kwargs: ``dict``
+ :param callback_kwargs: Keyword arguments which are passed to the
+ callback.
+
+ :typed response: :class:`Response`
+ :param response: Response instance.
+
+ :type success_status_code: ``int``
+ :param success_status_code: Status code which represents a successful
+ transfer (defaults to httplib.OK)
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+ success_status_code = success_status_code or httplib.OK
+
+ if response.status == success_status_code:
+ return callback(**callback_kwargs)
+ elif response.status == httplib.NOT_FOUND:
+ raise ObjectDoesNotExistError(object_name=obj.name,
+ value='', driver=self)
+
+ raise LibcloudError(value='Unexpected status code: %s' %
+ (response.status),
+ driver=self)
+
+ def _save_object(self, response, obj, destination_path,
+ overwrite_existing=False, delete_on_failure=True,
+ chunk_size=None):
+ """
+ Save object to the provided path.
+
+ :type response: :class:`RawResponse`
+ :param response: RawResponse instance.
+
+ :type obj: :class:`Object`
+ :param obj: Object instance.
+
+ :type destination_path: ``str``
+ :param destination_path: Destination directory.
+
+ :type delete_on_failure: ``bool``
+ :param delete_on_failure: True to delete partially downloaded object if
+ the download fails.
+
+ :type overwrite_existing: ``bool``
+ :param overwrite_existing: True to overwrite a local path if it already
+ exists.
+
+ :type chunk_size: ``int``
+ :param chunk_size: Optional chunk size
+ (defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)
+
+ :return: ``True`` on success, ``False`` otherwise.
+ :rtype: ``bool``
+ """
+
+ chunk_size = chunk_size or CHUNK_SIZE
+
+ base_name = os.path.basename(destination_path)
+
+ if not base_name and not os.path.exists(destination_path):
+ raise LibcloudError(
+ value='Path %s does not exist' % (destination_path),
+ driver=self)
+
+ if not base_name:
+ file_path = pjoin(destination_path, obj.name)
+ else:
+ file_path = destination_path
+
+ if os.path.exists(file_path) and not overwrite_existing:
+ raise LibcloudError(
+ value='File %s already exists, but ' % (file_path) +
+ 'overwrite_existing=False',
+ driver=self)
+
+ stream = libcloud.utils.files.read_in_chunks(response, chunk_size)
+
+ try:
+ data_read = next(stream)
+ except StopIteration:
+ # Empty response?
+ return False
+
+ bytes_transferred = 0
+
+ with open(file_path, 'wb') as file_handle:
+ while len(data_read) > 0:
+ file_handle.write(b(data_read))
+ bytes_transferred += len(data_read)
+
+ try:
+ data_read = next(stream)
+ except StopIteration:
+ data_read = ''
+
+ if int(obj.size) != int(bytes_transferred):
+ # Transfer failed, support retry?
+ if delete_on_failure:
+ try:
+ os.unlink(file_path)
+ except Exception:
+ pass
+
+ return False
+
+ return True
+
+ def _upload_object(self, object_name, content_type, upload_func,
+ upload_func_kwargs, request_path, request_method='PUT',
+ headers=None, file_path=None, iterator=None):
+ """
+ Helper function for setting common request headers and calling the
+ passed in callback which uploads an object.
+ """
+ headers = headers or {}
+
+ if file_path and not os.path.exists(file_path):
+ raise OSError('File %s does not exist' % (file_path))
+
+ if iterator is not None and not hasattr(iterator, 'next') and not \
+ hasattr(iterator, '__next__'):
+ raise AttributeError('iterator object must implement next() ' +
+ 'method.')
+
+ if not content_type:
+ if file_path:
+ name = file_path
+ else:
+ name = object_name
+ content_type, _ = libcloud.utils.files.guess_file_mime_type(name)
+
+ if not content_type:
+ if self.strict_mode:
+ raise AttributeError('File content-type could not be '
+ 'guessed and no content_type value '
+ 'is provided')
+ else:
+ # Fallback to a content-type
+ content_type = DEFAULT_CONTENT_TYPE
+
+ file_size = None
+
+ if iterator:
+ if self.supports_chunked_encoding:
+ headers['Transfer-Encoding'] = 'chunked'
+ upload_func_kwargs['chunked'] = True
+ else:
+ # Chunked transfer encoding is not supported. Need to buffer
+ # all the data in memory so we can determine file size.
+ iterator = libcloud.utils.files.read_in_chunks(
+ iterator=iterator)
+ data = libcloud.utils.files.exhaust_iterator(iterator=iterator)
+
+ file_size = len(data)
+ upload_func_kwargs['data'] = data
+ else:
+ file_size = os.path.getsize(file_path)
+ upload_func_kwargs['chunked'] = False
+
+ if file_size is not None and 'Content-Length' not in headers:
+ headers['Content-Length'] = file_size
+
+ headers['Content-Type'] = content_type
+ response = self.connection.request(request_path,
+ method=request_method, data=None,
+ headers=headers, raw=True)
+
+ upload_func_kwargs['response'] = response
+ success, data_hash, bytes_transferred = upload_func(
+ **upload_func_kwargs)
+
+ if not success:
+ raise LibcloudError(
+ value='Object upload failed, Perhaps a timeout?', driver=self)
+
+ result_dict = {'response': response, 'data_hash': data_hash,
+ 'bytes_transferred': bytes_transferred}
+ return result_dict
+
+ def _upload_data(self, response, data, calculate_hash=True):
+ """
+ Upload data stored in a string.
+
+ :type response: :class:`RawResponse`
+ :param response: RawResponse object.
+
+ :type data: ``str``
+ :param data: Data to upload.
+
+ :type calculate_hash: ``bool``
+ :param calculate_hash: True to calculate hash of the transferred data.
+ (defauls to True).
+
+ :rtype: ``tuple``
+ :return: First item is a boolean indicator of success, second
+ one is the uploaded data MD5 hash and the third one
+ is the number of transferred bytes.
+ """
+ bytes_transferred = 0
+ data_hash = None
+
+ if calculate_hash:
+ data_hash = self._get_hash_function()
+ data_hash.update(b(data))
+
+ try:
+ response.connection.connection.send(b(data))
+ except Exception:
+ # TODO: let this exception propagate
+ # Timeout, etc.
+ return False, None, bytes_transferred
+
+ bytes_transferred = len(data)
+
+ if calculate_hash:
+ data_hash = data_hash.hexdigest()
+
+ return True, data_hash, bytes_transferred
+
+ def _stream_data(self, response, iterator, chunked=False,
+ calculate_hash=True, chunk_size=None, data=None):
+ """
+ Stream a data over an http connection.
+
+ :type response: :class:`RawResponse`
+ :param response: RawResponse object.
+
+ :type iterator: :class:`object`
+ :param response: An object which implements an iterator interface
+ or a File like object with read method.
+
+ :type chunked: ``bool``
+ :param chunked: True if the chunked transfer encoding should be used
+ (defauls to False).
+
+ :type calculate_hash: ``bool``
+ :param calculate_hash: True to calculate hash of the transferred data.
+ (defauls to True).
+
+ :type chunk_size: ``int``
+ :param chunk_size: Optional chunk size (defaults to ``CHUNK_SIZE``)
+
+ :rtype: ``tuple``
+ :return: First item is a boolean indicator of success, second
+ one is the uploaded data MD5 hash and the third one
+ is the number of transferred bytes.
+ """
+
+ chunk_size = chunk_size or CHUNK_SIZE
+
+ data_hash = None
+ if calculate_hash:
+ data_hash = self._get_hash_function()
+
+ generator = libcloud.utils.files.read_in_chunks(iterator, chunk_size)
+
+ bytes_transferred = 0
+ try:
+ chunk = next(generator)
+ except StopIteration:
+ # Special case when StopIteration is thrown on the first iteration
+ # create a 0-byte long object
+ chunk = ''
+ if chunked:
+ response.connection.connection.send(b('%X\r\n' %
+ (len(chunk))))
+ response.connection.connection.send(chunk)
+ response.connection.connection.send(b('\r\n'))
+ response.connection.connection.send(b('0\r\n\r\n'))
+ else:
+ response.connection.connection.send(chunk)
+ return True, data_hash.hexdigest(), bytes_transferred
+
+ while len(chunk) > 0:
+ try:
+ if chunked:
+ response.connection.connection.send(b('%X\r\n' %
+ (len(chunk))))
+ response.connection.connection.send(b(chunk))
+ response.connection.connection.send(b('\r\n'))
+ else:
+ response.connection.connection.send(b(chunk))
+ except Exception:
+ # TODO: let this exception propagate
+ # Timeout, etc.
+ return False, None, bytes_transferred
+
+ bytes_transferred += len(chunk)
+ if calculate_hash:
+ data_hash.update(b(chunk))
+
+ try:
+ chunk = next(generator)
+ except StopIteration:
+ chunk = ''
+
+ if chunked:
+ response.connection.connection.send(b('0\r\n\r\n'))
+
+ if calculate_hash:
+ data_hash = data_hash.hexdigest()
+
+ return True, data_hash, bytes_transferred
+
+ def _upload_file(self, response, file_path, chunked=False,
+ calculate_hash=True):
+ """
+ Upload a file to the server.
+
+ :type response: :class:`RawResponse`
+ :param response: RawResponse object.
+
+ :type file_path: ``str``
+ :param file_path: Path to a local file.
+
+ :type iterator: :class:`object`
+ :param response: An object which implements an iterator interface (File
+ object, etc.)
+
+ :rtype: ``tuple``
+ :return: First item is a boolean indicator of success, second
+ one is the uploaded data MD5 hash and the third one
+ is the number of transferred bytes.
+ """
+ with open(file_path, 'rb') as file_handle:
+ success, data_hash, bytes_transferred = (
+ self._stream_data(
+ response=response,
+ iterator=iter(file_handle),
+ chunked=chunked,
+ calculate_hash=calculate_hash))
+
+ return success, data_hash, bytes_transferred
+
+ def _get_hash_function(self):
+ """
+ Return instantiated hash function for the hash type supported by
+ the provider.
+ """
+ try:
+ func = getattr(hashlib, self.hash_type)()
+ except AttributeError:
+ raise RuntimeError('Invalid or unsupported hash type: %s' %
+ (self.hash_type))
+
+ return func
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/__init__.py b/awx/lib/site-packages/libcloud/storage/drivers/__init__.py
new file mode 100644
index 0000000000..fe8b04f388
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/__init__.py
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Drivers for working with different providers
+"""
+
+__all__ = [
+ 'dummy',
+ 'cloudfiles'
+]
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/atmos.py b/awx/lib/site-packages/libcloud/storage/drivers/atmos.py
new file mode 100644
index 0000000000..a1e12cc8e5
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/atmos.py
@@ -0,0 +1,472 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import base64
+import hashlib
+import hmac
+import time
+
+from libcloud.utils.py3 import PY3
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import next
+from libcloud.utils.py3 import urlparse
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import urlquote
+from libcloud.utils.py3 import urlunquote
+
+if PY3:
+ from io import FileIO as file
+
+from libcloud.utils.files import read_in_chunks, guess_file_mime_type
+from libcloud.common.base import ConnectionUserAndKey, XmlResponse
+from libcloud.common.types import LibcloudError
+
+from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE
+from libcloud.storage.types import ContainerAlreadyExistsError, \
+ ContainerDoesNotExistError, ContainerIsNotEmptyError, \
+ ObjectDoesNotExistError
+
+
+def collapse(s):
+ return ' '.join([x for x in s.split(' ') if x])
+
+
+class AtmosError(LibcloudError):
+ def __init__(self, code, message, driver=None):
+ super(AtmosError, self).__init__(value=message, driver=driver)
+ self.code = code
+
+
+class AtmosResponse(XmlResponse):
+ def success(self):
+ return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
+ httplib.PARTIAL_CONTENT)
+
+ def parse_error(self):
+ tree = self.parse_body()
+
+ if tree is None:
+ return None
+
+ code = int(tree.find('Code').text)
+ message = tree.find('Message').text
+ raise AtmosError(code=code, message=message,
+ driver=self.connection.driver)
+
+
+class AtmosConnection(ConnectionUserAndKey):
+ responseCls = AtmosResponse
+
+ def add_default_headers(self, headers):
+ headers['x-emc-uid'] = self.user_id
+ headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
+ time.gmtime())
+ headers['x-emc-date'] = headers['Date']
+
+ if 'Content-Type' not in headers:
+ headers['Content-Type'] = 'application/octet-stream'
+ if 'Accept' not in headers:
+ headers['Accept'] = '*/*'
+
+ return headers
+
+ def pre_connect_hook(self, params, headers):
+ headers['x-emc-signature'] = self._calculate_signature(params, headers)
+
+ return params, headers
+
+ def _calculate_signature(self, params, headers):
+ pathstring = urlunquote(self.action)
+ if pathstring.startswith(self.driver.path):
+ pathstring = pathstring[len(self.driver.path):]
+ if params:
+ if type(params) is dict:
+ params = list(params.items())
+ pathstring += '?' + urlencode(params)
+ pathstring = pathstring.lower()
+
+ xhdrs = [(k, v) for k, v in list(headers.items()) if
+ k.startswith('x-emc-')]
+ xhdrs.sort(key=lambda x: x[0])
+
+ signature = [
+ self.method,
+ headers.get('Content-Type', ''),
+ headers.get('Range', ''),
+ headers.get('Date', ''),
+ pathstring,
+ ]
+ signature.extend([k + ':' + collapse(v) for k, v in xhdrs])
+ signature = '\n'.join(signature)
+ key = base64.b64decode(self.key)
+ signature = hmac.new(b(key), b(signature), hashlib.sha1).digest()
+ return base64.b64encode(b(signature)).decode('utf-8')
+
+
+class AtmosDriver(StorageDriver):
+ connectionCls = AtmosConnection
+
+ host = None
+ path = None
+ api_name = 'atmos'
+ supports_chunked_encoding = True
+ website = 'http://atmosonline.com/'
+ name = 'atmos'
+
+ DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None):
+ host = host or self.host
+ super(AtmosDriver, self).__init__(key, secret, secure, host, port)
+
+ def iterate_containers(self):
+ result = self.connection.request(self._namespace_path(''))
+ entries = self._list_objects(result.object, object_type='directory')
+ for entry in entries:
+ extra = {
+ 'object_id': entry['id']
+ }
+ yield Container(entry['name'], extra, self)
+
+ def get_container(self, container_name):
+ path = self._namespace_path(container_name) + '/?metadata/system'
+ try:
+ result = self.connection.request(path)
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code != 1003:
+ raise
+ raise ContainerDoesNotExistError(e, self, container_name)
+ meta = self._emc_meta(result)
+ extra = {
+ 'object_id': meta['objectid']
+ }
+ return Container(container_name, extra, self)
+
+ def create_container(self, container_name):
+ path = self._namespace_path(container_name) + '/'
+ try:
+ self.connection.request(path, method='POST')
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code != 1016:
+ raise
+ raise ContainerAlreadyExistsError(e, self, container_name)
+ return self.get_container(container_name)
+
+ def delete_container(self, container):
+ try:
+ self.connection.request(self._namespace_path(container.name) + '/',
+ method='DELETE')
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code == 1003:
+ raise ContainerDoesNotExistError(e, self, container.name)
+ elif e.code == 1023:
+ raise ContainerIsNotEmptyError(e, self, container.name)
+ return True
+
+ def get_object(self, container_name, object_name):
+ container = self.get_container(container_name)
+ object_name_cleaned = self._clean_object_name(object_name)
+ path = self._namespace_path(container_name) + '/' + object_name_cleaned
+
+ try:
+ result = self.connection.request(path + '?metadata/system')
+ system_meta = self._emc_meta(result)
+
+ result = self.connection.request(path + '?metadata/user')
+ user_meta = self._emc_meta(result)
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code != 1003:
+ raise
+ raise ObjectDoesNotExistError(e, self, object_name)
+
+ last_modified = time.strptime(system_meta['mtime'],
+ '%Y-%m-%dT%H:%M:%SZ')
+ last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
+ last_modified)
+ extra = {
+ 'object_id': system_meta['objectid'],
+ 'last_modified': last_modified
+ }
+ data_hash = user_meta.pop('md5', '')
+ return Object(object_name, int(system_meta['size']), data_hash, extra,
+ user_meta, container, self)
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True):
+ upload_func = self._upload_file
+ upload_func_kwargs = {'file_path': file_path}
+ method = 'PUT'
+
+ extra = extra or {}
+ object_name_cleaned = self._clean_object_name(object_name)
+ request_path = self._namespace_path(container.name) + '/' +\
+ object_name_cleaned
+ content_type = extra.get('content_type', None)
+
+ try:
+ self.connection.request(request_path + '?metadata/system')
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code != 1003:
+ raise
+ method = 'POST'
+
+ result_dict = self._upload_object(
+ object_name=object_name,
+ content_type=content_type,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ request_path=request_path,
+ request_method=method,
+ headers={}, file_path=file_path)
+
+ bytes_transferred = result_dict['bytes_transferred']
+
+ if extra is None:
+ meta_data = {}
+ else:
+ meta_data = extra.get('meta_data', {})
+ meta_data['md5'] = result_dict['data_hash']
+ user_meta = ', '.join([k + '=' + str(v) for k, v in
+ list(meta_data.items())])
+ self.connection.request(request_path + '?metadata/user', method='POST',
+ headers={'x-emc-meta': user_meta})
+ result = self.connection.request(request_path + '?metadata/system')
+ meta = self._emc_meta(result)
+ del meta_data['md5']
+ extra = {
+ 'object_id': meta['objectid'],
+ 'meta_data': meta_data,
+ }
+
+ return Object(object_name, bytes_transferred, result_dict['data_hash'],
+ extra, meta_data, container, self)
+
+ def upload_object_via_stream(self, iterator, container, object_name,
+ extra=None):
+ if isinstance(iterator, file):
+ iterator = iter(iterator)
+
+ data_hash = hashlib.md5()
+ generator = read_in_chunks(iterator, CHUNK_SIZE, True)
+ bytes_transferred = 0
+ try:
+ chunk = next(generator)
+ except StopIteration:
+ chunk = ''
+
+ path = self._namespace_path(container.name + '/' + object_name)
+ method = 'PUT'
+
+ if extra is not None:
+ content_type = extra.get('content_type', None)
+ else:
+ content_type = None
+ if not content_type:
+ content_type, _ = guess_file_mime_type(object_name)
+
+ if not content_type:
+ raise AttributeError(
+ 'File content-type could not be guessed and' +
+ ' no content_type value provided')
+
+ try:
+ self.connection.request(path + '?metadata/system')
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code != 1003:
+ raise
+ method = 'POST'
+
+ while True:
+ end = bytes_transferred + len(chunk) - 1
+ data_hash.update(b(chunk))
+ headers = {
+ 'x-emc-meta': 'md5=' + data_hash.hexdigest(),
+ 'Content-Type': content_type,
+ }
+
+ if len(chunk) > 0 and bytes_transferred > 0:
+ headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end)
+ method = 'PUT'
+
+ result = self.connection.request(path, method=method, data=chunk,
+ headers=headers)
+ bytes_transferred += len(chunk)
+
+ try:
+ chunk = next(generator)
+ except StopIteration:
+ break
+ if len(chunk) == 0:
+ break
+
+ data_hash = data_hash.hexdigest()
+
+ if extra is None:
+ meta_data = {}
+ else:
+ meta_data = extra.get('meta_data', {})
+ meta_data['md5'] = data_hash
+ user_meta = ', '.join([k + '=' + str(v) for k, v in
+ list(meta_data.items())])
+ self.connection.request(path + '?metadata/user', method='POST',
+ headers={'x-emc-meta': user_meta})
+
+ result = self.connection.request(path + '?metadata/system')
+
+ meta = self._emc_meta(result)
+ extra = {
+ 'object_id': meta['objectid'],
+ 'meta_data': meta_data,
+ }
+
+ return Object(object_name, bytes_transferred, data_hash, extra,
+ meta_data, container, self)
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ path = self._namespace_path(obj.container.name + '/' + obj.name)
+ response = self.connection.request(path, method='GET', raw=True)
+
+ return self._get_object(obj=obj, callback=self._save_object,
+ response=response,
+ callback_kwargs={
+ 'obj': obj,
+ 'response': response.response,
+ 'destination_path': destination_path,
+ 'overwrite_existing': overwrite_existing,
+ 'delete_on_failure': delete_on_failure
+ },
+ success_status_code=httplib.OK)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ path = self._namespace_path(obj.container.name + '/' + obj.name)
+ response = self.connection.request(path, method='GET', raw=True)
+
+ return self._get_object(obj=obj, callback=read_in_chunks,
+ response=response,
+ callback_kwargs={
+ 'iterator': response.response,
+ 'chunk_size': chunk_size
+ },
+ success_status_code=httplib.OK)
+
+ def delete_object(self, obj):
+ path = self._namespace_path(obj.container.name) + '/' +\
+ self._clean_object_name(obj.name)
+ try:
+ self.connection.request(path, method='DELETE')
+ except AtmosError:
+ e = sys.exc_info()[1]
+ if e.code != 1003:
+ raise
+ raise ObjectDoesNotExistError(e, self, obj.name)
+ return True
+
+ def enable_object_cdn(self, obj):
+ return True
+
+ def get_object_cdn_url(self, obj, expiry=None, use_object=False):
+ """
+ Return a object CDN URL.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :param expiry: Expiry
+ :type expiry: ``str``
+
+ :param use_object: Use object
+ :type use_object: ``bool``
+
+ :rtype: ``str``
+ """
+ if use_object:
+ path = '/rest/objects' + obj.meta_data['object_id']
+ else:
+ path = '/rest/namespace/' + obj.container.name + '/' + obj.name
+
+ if self.secure:
+ protocol = 'https'
+ else:
+ protocol = 'http'
+
+ expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL)
+ params = [
+ ('uid', self.key),
+ ('expires', expiry),
+ ]
+ params.append(('signature', self._cdn_signature(path, params, expiry)))
+
+ params = urlencode(params)
+ path = self.path + path
+ return urlparse.urlunparse((protocol, self.host, path, '', params, ''))
+
+ def _cdn_signature(self, path, params, expiry):
+ key = base64.b64decode(self.secret)
+ signature = '\n'.join(['GET', path.lower(), self.key, expiry])
+ signature = hmac.new(key, signature, hashlib.sha1).digest()
+
+ return base64.b64encode(signature)
+
+ def _list_objects(self, tree, object_type=None):
+ listing = tree.find(self._emc_tag('DirectoryList'))
+ entries = []
+ for entry in listing.findall(self._emc_tag('DirectoryEntry')):
+ file_type = entry.find(self._emc_tag('FileType')).text
+ if object_type is not None and object_type != file_type:
+ continue
+ entries.append({
+ 'id': entry.find(self._emc_tag('ObjectID')).text,
+ 'type': file_type,
+ 'name': entry.find(self._emc_tag('Filename')).text
+ })
+ return entries
+
+ def _clean_object_name(self, name):
+ return urlquote(name.encode('ascii'))
+
+ def _namespace_path(self, path):
+ return self.path + '/rest/namespace/' + urlquote(path.encode('ascii'))
+
+ def _object_path(self, object_id):
+ return self.path + '/rest/objects/' + object_id.encode('ascii')
+
+ @staticmethod
+ def _emc_tag(tag):
+ return '{http://www.emc.com/cos/}' + tag
+
+ def _emc_meta(self, response):
+ meta = response.headers.get('x-emc-meta', '')
+ if len(meta) == 0:
+ return {}
+ meta = meta.split(', ')
+ return dict([x.split('=', 1) for x in meta])
+
+ def iterate_container_objects(self, container):
+ headers = {'x-emc-include-meta': '1'}
+ path = self._namespace_path(container.name) + '/'
+ result = self.connection.request(path, headers=headers)
+ entries = self._list_objects(result.object, object_type='regular')
+ for entry in entries:
+ metadata = {'object_id': entry['id']}
+ yield Object(entry['name'], 0, '', {}, metadata, container, self)
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/azure_blobs.py b/awx/lib/site-packages/libcloud/storage/drivers/azure_blobs.py
new file mode 100644
index 0000000000..8d00a8dcf8
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/azure_blobs.py
@@ -0,0 +1,986 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import with_statement
+
+import base64
+import os
+import binascii
+
+from xml.etree.ElementTree import Element, SubElement
+
+from libcloud.utils.py3 import PY3
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlquote
+from libcloud.utils.py3 import tostring
+from libcloud.utils.py3 import b
+
+from libcloud.utils.xml import fixxpath
+from libcloud.utils.files import read_in_chunks
+from libcloud.common.types import LibcloudError
+from libcloud.common.azure import AzureConnection
+
+from libcloud.storage.base import Object, Container, StorageDriver
+from libcloud.storage.types import ContainerIsNotEmptyError
+from libcloud.storage.types import ContainerAlreadyExistsError
+from libcloud.storage.types import InvalidContainerNameError
+from libcloud.storage.types import ContainerDoesNotExistError
+from libcloud.storage.types import ObjectDoesNotExistError
+from libcloud.storage.types import ObjectHashMismatchError
+
+if PY3:
+ from io import FileIO as file
+
+# Desired number of items in each response inside a paginated request
+RESPONSES_PER_REQUEST = 100
+
+# As per the Azure documentation, if the upload file size is less than
+# 64MB, we can upload it in a single request. However, in real life azure
+# servers seem to disconnect randomly after around 5 MB or 200s of upload.
+# So, it is better that for file sizes greater than 4MB, we upload it in
+# chunks.
+# Also, with large sizes, if we use a lease, the lease will timeout after
+# 60 seconds, but the upload might still be in progress. This can be
+# handled in code, but if we use chunked uploads, the lease renewal will
+# happen automatically.
+AZURE_BLOCK_MAX_SIZE = 4 * 1024 * 1024
+
+# Azure block blocks must be maximum 4MB
+# Azure page blobs must be aligned in 512 byte boundaries (4MB fits that)
+AZURE_CHUNK_SIZE = 4 * 1024 * 1024
+
+# Azure page blob must be aligned in 512 byte boundaries
+AZURE_PAGE_CHUNK_SIZE = 512
+
+# The time period (in seconds) for which a lease must be obtained.
+# If set as -1, we get an infinite lease, but that is a bad idea. If
+# after getting an infinite lease, there was an issue in releasing the
+# lease, the object will remain 'locked' forever, unless the lease is
+# released using the lease_id (which is not exposed to the user)
+AZURE_LEASE_PERIOD = 60
+
+AZURE_STORAGE_HOST_SUFFIX = 'blob.core.windows.net'
+
+
+class AzureBlobLease(object):
+ """
+ A class to help in leasing an azure blob and renewing the lease
+ """
+ def __init__(self, driver, object_path, use_lease):
+ """
+ :param driver: The Azure storage driver that is being used
+ :type driver: :class:`AzureStorageDriver`
+
+ :param object_path: The path of the object we need to lease
+ :type object_path: ``str``
+
+ :param use_lease: Indicates if we must take a lease or not
+ :type use_lease: ``bool``
+ """
+ self.object_path = object_path
+ self.driver = driver
+ self.use_lease = use_lease
+ self.lease_id = None
+ self.params = {'comp': 'lease'}
+
+ def renew(self):
+ """
+ Renew the lease if it is older than a predefined time period
+ """
+ if self.lease_id is None:
+ return
+
+ headers = {'x-ms-lease-action': 'renew',
+ 'x-ms-lease-id': self.lease_id,
+ 'x-ms-lease-duration': '60'}
+
+ response = self.driver.connection.request(self.object_path,
+ headers=headers,
+ params=self.params,
+ method='PUT')
+
+ if response.status != httplib.OK:
+ raise LibcloudError('Unable to obtain lease', driver=self)
+
+ def update_headers(self, headers):
+ """
+ Update the lease id in the headers
+ """
+ if self.lease_id:
+ headers['x-ms-lease-id'] = self.lease_id
+
+ def __enter__(self):
+ if not self.use_lease:
+ return self
+
+ headers = {'x-ms-lease-action': 'acquire',
+ 'x-ms-lease-duration': '60'}
+
+ response = self.driver.connection.request(self.object_path,
+ headers=headers,
+ params=self.params,
+ method='PUT')
+
+ if response.status == httplib.NOT_FOUND:
+ return self
+ elif response.status != httplib.CREATED:
+ raise LibcloudError('Unable to obtain lease', driver=self)
+
+ self.lease_id = response.headers['x-ms-lease-id']
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self.lease_id is None:
+ return
+
+ headers = {'x-ms-lease-action': 'release',
+ 'x-ms-lease-id': self.lease_id}
+ response = self.driver.connection.request(self.object_path,
+ headers=headers,
+ params=self.params,
+ method='PUT')
+
+ if response.status != httplib.OK:
+ raise LibcloudError('Unable to release lease', driver=self)
+
+
+class AzureBlobsConnection(AzureConnection):
+ """
+ Represents a single connection to Azure Blobs
+ """
+
+
+class AzureBlobsStorageDriver(StorageDriver):
+ name = 'Microsoft Azure (blobs)'
+ website = 'http://windows.azure.com/'
+ connectionCls = AzureBlobsConnection
+ hash_type = 'md5'
+ supports_chunked_encoding = False
+ ex_blob_type = 'BlockBlob'
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ **kwargs):
+ self._host_argument_set = bool(host)
+
+ # B64decode() this key and keep it, so that we don't have to do
+ # so for every request. Minor performance improvement
+ secret = base64.b64decode(b(secret))
+
+ super(AzureBlobsStorageDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, **kwargs)
+
+ def _ex_connection_class_kwargs(self):
+ result = {}
+
+ # host argument has precedence
+ if not self._host_argument_set:
+ result['host'] = '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX)
+
+ return result
+
+ def _xml_to_container(self, node):
+ """
+ Converts a container XML node to a container instance
+
+ :param node: XML info of the container
+ :type node: :class:`xml.etree.ElementTree.Element`
+
+ :return: A container instance
+ :rtype: :class:`Container`
+ """
+
+ name = node.findtext(fixxpath(xpath='Name'))
+ props = node.find(fixxpath(xpath='Properties'))
+ metadata = node.find(fixxpath(xpath='Metadata'))
+
+ extra = {
+ 'url': node.findtext(fixxpath(xpath='Url')),
+ 'last_modified': node.findtext(fixxpath(xpath='Last-Modified')),
+ 'etag': props.findtext(fixxpath(xpath='Etag')),
+ 'lease': {
+ 'status': props.findtext(fixxpath(xpath='LeaseStatus')),
+ 'state': props.findtext(fixxpath(xpath='LeaseState')),
+ 'duration': props.findtext(fixxpath(xpath='LeaseDuration')),
+ },
+ 'meta_data': {}
+ }
+
+ for meta in metadata.getchildren():
+ extra['meta_data'][meta.tag] = meta.text
+
+ return Container(name=name, extra=extra, driver=self)
+
+ def _response_to_container(self, container_name, response):
+ """
+ Converts a HTTP response to a container instance
+
+ :param container_name: Name of the container
+ :type container_name: ``str``
+
+ :param response: HTTP Response
+ :type node: L{}
+
+ :return: A container instance
+ :rtype: :class:`Container`
+ """
+
+ headers = response.headers
+ extra = {
+ 'url': 'http://%s%s' % (response.connection.host,
+ response.connection.action),
+ 'etag': headers['etag'],
+ 'last_modified': headers['last-modified'],
+ 'lease': {
+ 'status': headers.get('x-ms-lease-status', None),
+ 'state': headers.get('x-ms-lease-state', None),
+ 'duration': headers.get('x-ms-lease-duration', None),
+ },
+ 'meta_data': {}
+ }
+
+ for key, value in response.headers.items():
+ if key.startswith('x-ms-meta-'):
+ key = key.split('x-ms-meta-')[1]
+ extra['meta_data'][key] = value
+
+ return Container(name=container_name, extra=extra, driver=self)
+
+ def _xml_to_object(self, container, blob):
+ """
+ Converts a BLOB XML node to an object instance
+
+ :param container: Instance of the container holding the blob
+ :type: :class:`Container`
+
+ :param blob: XML info of the blob
+ :type blob: L{}
+
+ :return: An object instance
+ :rtype: :class:`Object`
+ """
+
+ name = blob.findtext(fixxpath(xpath='Name'))
+ props = blob.find(fixxpath(xpath='Properties'))
+ metadata = blob.find(fixxpath(xpath='Metadata'))
+ etag = props.findtext(fixxpath(xpath='Etag'))
+ size = int(props.findtext(fixxpath(xpath='Content-Length')))
+
+ extra = {
+ 'content_type': props.findtext(fixxpath(xpath='Content-Type')),
+ 'etag': etag,
+ 'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')),
+ 'last_modified': props.findtext(fixxpath(xpath='Last-Modified')),
+ 'url': blob.findtext(fixxpath(xpath='Url')),
+ 'hash': props.findtext(fixxpath(xpath='Etag')),
+ 'lease': {
+ 'status': props.findtext(fixxpath(xpath='LeaseStatus')),
+ 'state': props.findtext(fixxpath(xpath='LeaseState')),
+ 'duration': props.findtext(fixxpath(xpath='LeaseDuration')),
+ },
+ 'content_encoding': props.findtext(fixxpath(
+ xpath='Content-Encoding')),
+ 'content_language': props.findtext(fixxpath(
+ xpath='Content-Language')),
+ 'blob_type': props.findtext(fixxpath(xpath='BlobType'))
+ }
+
+ if extra['md5_hash']:
+ value = binascii.hexlify(base64.b64decode(b(extra['md5_hash'])))
+ value = value.decode('ascii')
+ extra['md5_hash'] = value
+
+ meta_data = {}
+ for meta in metadata.getchildren():
+ meta_data[meta.tag] = meta.text
+
+ return Object(name=name, size=size, hash=etag, meta_data=meta_data,
+ extra=extra, container=container, driver=self)
+
+ def _response_to_object(self, object_name, container, response):
+ """
+ Converts a HTTP response to an object (from headers)
+
+ :param object_name: Name of the object
+ :type object_name: ``str``
+
+ :param container: Instance of the container holding the blob
+ :type: :class:`Container`
+
+ :param response: HTTP Response
+ :type node: L{}
+
+ :return: An object instance
+ :rtype: :class:`Object`
+ """
+
+ headers = response.headers
+ size = int(headers['content-length'])
+ etag = headers['etag']
+
+ extra = {
+ 'url': 'http://%s%s' % (response.connection.host,
+ response.connection.action),
+ 'etag': etag,
+ 'md5_hash': headers.get('content-md5', None),
+ 'content_type': headers.get('content-type', None),
+ 'content_language': headers.get('content-language', None),
+ 'content_encoding': headers.get('content-encoding', None),
+ 'last_modified': headers['last-modified'],
+ 'lease': {
+ 'status': headers.get('x-ms-lease-status', None),
+ 'state': headers.get('x-ms-lease-state', None),
+ 'duration': headers.get('x-ms-lease-duration', None),
+ },
+ 'blob_type': headers['x-ms-blob-type']
+ }
+
+ if extra['md5_hash']:
+ value = binascii.hexlify(base64.b64decode(b(extra['md5_hash'])))
+ value = value.decode('ascii')
+ extra['md5_hash'] = value
+
+ meta_data = {}
+ for key, value in response.headers.items():
+ if key.startswith('x-ms-meta-'):
+ key = key.split('x-ms-meta-')[1]
+ meta_data[key] = value
+
+ return Object(name=object_name, size=size, hash=etag, extra=extra,
+ meta_data=meta_data, container=container, driver=self)
+
+ def iterate_containers(self):
+ """
+ @inherits: :class:`StorageDriver.iterate_containers`
+ """
+ params = {'comp': 'list',
+ 'maxresults': RESPONSES_PER_REQUEST,
+ 'include': 'metadata'}
+
+ while True:
+ response = self.connection.request('/', params)
+ if response.status != httplib.OK:
+ raise LibcloudError('Unexpected status code: %s' %
+ (response.status), driver=self)
+
+ body = response.parse_body()
+ containers = body.find(fixxpath(xpath='Containers'))
+ containers = containers.findall(fixxpath(xpath='Container'))
+
+ for container in containers:
+ yield self._xml_to_container(container)
+
+ params['marker'] = body.findtext('NextMarker')
+ if not params['marker']:
+ break
+
+ def iterate_container_objects(self, container):
+ """
+ @inherits: :class:`StorageDriver.iterate_container_objects`
+ """
+ params = {'restype': 'container',
+ 'comp': 'list',
+ 'maxresults': RESPONSES_PER_REQUEST,
+ 'include': 'metadata'}
+
+ container_path = self._get_container_path(container)
+
+ while True:
+ response = self.connection.request(container_path,
+ params=params)
+
+ if response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value=None,
+ driver=self,
+ container_name=container.name)
+
+ elif response.status != httplib.OK:
+ raise LibcloudError('Unexpected status code: %s' %
+ (response.status), driver=self)
+
+ body = response.parse_body()
+ blobs = body.find(fixxpath(xpath='Blobs'))
+ blobs = blobs.findall(fixxpath(xpath='Blob'))
+
+ for blob in blobs:
+ yield self._xml_to_object(container, blob)
+
+ params['marker'] = body.findtext('NextMarker')
+ if not params['marker']:
+ break
+
+ def get_container(self, container_name):
+ """
+ @inherits: :class:`StorageDriver.get_container`
+ """
+ params = {'restype': 'container'}
+
+ container_path = '/%s' % (container_name)
+
+ response = self.connection.request(container_path, params=params,
+ method='HEAD')
+
+ if response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError('Container %s does not exist' %
+ (container_name), driver=self,
+ container_name=container_name)
+ elif response.status != httplib.OK:
+ raise LibcloudError('Unexpected status code: %s' %
+ (response.status), driver=self)
+
+ return self._response_to_container(container_name, response)
+
+ def get_object(self, container_name, object_name):
+ """
+ @inherits: :class:`StorageDriver.get_object`
+ """
+
+ container = self.get_container(container_name=container_name)
+ object_path = self._get_object_path(container, object_name)
+
+ response = self.connection.request(object_path, method='HEAD')
+
+ if response.status == httplib.OK:
+ obj = self._response_to_object(object_name, container, response)
+ return obj
+
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=object_name)
+
+ def _get_container_path(self, container):
+ """
+ Return a container path
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :return: A path for this container.
+ :rtype: ``str``
+ """
+ return '/%s' % (container.name)
+
+ def _get_object_path(self, container, object_name):
+ """
+ Return an object's CDN path.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param object_name: Object name
+ :type object_name: :class:`str`
+
+ :return: A path for this object.
+ :rtype: ``str``
+ """
+ container_url = self._get_container_path(container)
+ object_name_cleaned = urlquote(object_name)
+ object_path = '%s/%s' % (container_url, object_name_cleaned)
+ return object_path
+
+ def create_container(self, container_name):
+ """
+ @inherits: :class:`StorageDriver.create_container`
+ """
+ params = {'restype': 'container'}
+
+ container_path = '/%s' % (container_name)
+ response = self.connection.request(container_path, params=params,
+ method='PUT')
+
+ if response.status == httplib.CREATED:
+ return self._response_to_container(container_name, response)
+ elif response.status == httplib.CONFLICT:
+ raise ContainerAlreadyExistsError(
+ value='Container with this name already exists. The name must '
+ 'be unique among all the containers in the system',
+ container_name=container_name, driver=self)
+ elif response.status == httplib.BAD_REQUEST:
+ raise InvalidContainerNameError(value='Container name contains ' +
+ 'invalid characters.',
+ container_name=container_name,
+ driver=self)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status),
+ driver=self)
+
+ def delete_container(self, container):
+ """
+ @inherits: :class:`StorageDriver.delete_container`
+ """
+ # Azure does not check if the container is empty. So, we will do
+ # a check to ensure that the behaviour is similar to other drivers
+ for obj in container.iterate_objects():
+ raise ContainerIsNotEmptyError(
+ value='Container must be empty before it can be deleted.',
+ container_name=container.name, driver=self)
+
+ params = {'restype': 'container'}
+ container_path = self._get_container_path(container)
+
+ # Note: All the objects in the container must be deleted first
+ response = self.connection.request(container_path, params=params,
+ method='DELETE')
+
+ if response.status == httplib.ACCEPTED:
+ return True
+ elif response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value=None,
+ driver=self,
+ container_name=container.name)
+
+ return False
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ """
+ @inherits: :class:`StorageDriver.download_object`
+ """
+ obj_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(obj_path, raw=True, data=None)
+
+ return self._get_object(obj=obj, callback=self._save_object,
+ response=response,
+ callback_kwargs={
+ 'obj': obj,
+ 'response': response.response,
+ 'destination_path': destination_path,
+ 'overwrite_existing': overwrite_existing,
+ 'delete_on_failure': delete_on_failure},
+ success_status_code=httplib.OK)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ """
+ @inherits: :class:`StorageDriver.download_object_as_stream`
+ """
+ obj_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(obj_path, raw=True, data=None)
+
+ return self._get_object(obj=obj, callback=read_in_chunks,
+ response=response,
+ callback_kwargs={'iterator': response.response,
+ 'chunk_size': chunk_size},
+ success_status_code=httplib.OK)
+
+ def _upload_in_chunks(self, response, data, iterator, object_path,
+ blob_type, lease, calculate_hash=True):
+ """
+ Uploads data from an interator in fixed sized chunks to S3
+
+ :param response: Response object from the initial POST request
+ :type response: :class:`RawResponse`
+
+ :param data: Any data from the initial POST request
+ :type data: ``str``
+
+ :param iterator: The generator for fetching the upload data
+ :type iterator: ``generator``
+
+ :param object_path: The path of the object to which we are uploading
+ :type object_name: ``str``
+
+ :param blob_type: The blob type being uploaded
+ :type blob_type: ``str``
+
+ :param lease: The lease object to be used for renewal
+ :type lease: :class:`AzureBlobLease`
+
+ :keyword calculate_hash: Indicates if we must calculate the data hash
+ :type calculate_hash: ``bool``
+
+ :return: A tuple of (status, checksum, bytes transferred)
+ :rtype: ``tuple``
+ """
+
+ # Get the upload id from the response xml
+ if response.status != httplib.CREATED:
+ raise LibcloudError('Error initializing upload. Code: %d' %
+ (response.status), driver=self)
+
+ data_hash = None
+ if calculate_hash:
+ data_hash = self._get_hash_function()
+
+ bytes_transferred = 0
+ count = 1
+ chunks = []
+ headers = {}
+
+ lease.update_headers(headers)
+
+ if blob_type == 'BlockBlob':
+ params = {'comp': 'block'}
+ else:
+ params = {'comp': 'page'}
+
+ # Read the input data in chunk sizes suitable for AWS
+ for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE):
+ data = b(data)
+ content_length = len(data)
+ offset = bytes_transferred
+ bytes_transferred += content_length
+
+ if calculate_hash:
+ data_hash.update(data)
+
+ chunk_hash = self._get_hash_function()
+ chunk_hash.update(data)
+ chunk_hash = base64.b64encode(b(chunk_hash.digest()))
+
+ headers['Content-MD5'] = chunk_hash.decode('utf-8')
+ headers['Content-Length'] = content_length
+
+ if blob_type == 'BlockBlob':
+ # Block id can be any unique string that is base64 encoded
+ # A 10 digit number can hold the max value of 50000 blocks
+ # that are allowed for azure
+ block_id = base64.b64encode(b('%10d' % (count)))
+ block_id = block_id.decode('utf-8')
+ params['blockid'] = block_id
+
+ # Keep this data for a later commit
+ chunks.append(block_id)
+ else:
+ headers['x-ms-page-write'] = 'update'
+ headers['x-ms-range'] = 'bytes=%d-%d' % \
+ (offset, bytes_transferred-1)
+
+ # Renew lease before updating
+ lease.renew()
+
+ resp = self.connection.request(object_path, method='PUT',
+ data=data, headers=headers,
+ params=params)
+
+ if resp.status != httplib.CREATED:
+ resp.parse_error()
+ raise LibcloudError('Error uploading chunk %d. Code: %d' %
+ (count, resp.status), driver=self)
+
+ count += 1
+
+ if calculate_hash:
+ data_hash = data_hash.hexdigest()
+
+ if blob_type == 'BlockBlob':
+ self._commit_blocks(object_path, chunks, lease)
+
+ # The Azure service does not return a hash immediately for
+ # chunked uploads. It takes some time for the data to get synced
+ response.headers['content-md5'] = None
+
+ return (True, data_hash, bytes_transferred)
+
+ def _commit_blocks(self, object_path, chunks, lease):
+ """
+ Makes a final commit of the data.
+
+ :param object_path: Server side object path.
+ :type object_path: ``str``
+
+ :param upload_id: A list of (chunk_number, chunk_hash) tuples.
+ :type upload_id: ``list``
+ """
+
+ root = Element('BlockList')
+
+ for block_id in chunks:
+ part = SubElement(root, 'Uncommitted')
+ part.text = str(block_id)
+
+ data = tostring(root)
+ params = {'comp': 'blocklist'}
+ headers = {}
+
+ lease.update_headers(headers)
+ lease.renew()
+
+ response = self.connection.request(object_path, data=data,
+ params=params, headers=headers,
+ method='PUT')
+
+ if response.status != httplib.CREATED:
+ raise LibcloudError('Error in blocklist commit', driver=self)
+
+ def _check_values(self, blob_type, object_size):
+ """
+ Checks if extension arguments are valid
+
+ :param blob_type: The blob type that is being uploaded
+ :type blob_type: ``str``
+
+ :param object_size: The (max) size of the object being uploaded
+ :type object_size: ``int``
+ """
+
+ if blob_type not in ['BlockBlob', 'PageBlob']:
+ raise LibcloudError('Invalid blob type', driver=self)
+
+ if blob_type == 'PageBlob':
+ if not object_size:
+ raise LibcloudError('Max blob size is mandatory for page blob',
+ driver=self)
+
+ if object_size % AZURE_PAGE_CHUNK_SIZE:
+ raise LibcloudError('Max blob size is not aligned to '
+ 'page boundary', driver=self)
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True, ex_blob_type=None, ex_use_lease=False):
+ """
+ Upload an object currently located on a disk.
+
+ @inherits: :class:`StorageDriver.upload_object`
+
+ :param ex_blob_type: Storage class
+ :type ex_blob_type: ``str``
+
+ :param ex_use_lease: Indicates if we must take a lease before upload
+ :type ex_use_lease: ``bool``
+ """
+
+ if ex_blob_type is None:
+ ex_blob_type = self.ex_blob_type
+
+ # Get the size of the file
+ file_size = os.stat(file_path).st_size
+
+ # The presumed size of the object
+ object_size = file_size
+
+ self._check_values(ex_blob_type, file_size)
+
+ with file(file_path, 'rb') as file_handle:
+ iterator = iter(file_handle)
+
+ # If size is greater than 64MB or type is Page, upload in chunks
+ if ex_blob_type == 'PageBlob' or file_size > AZURE_BLOCK_MAX_SIZE:
+ # For chunked upload of block blobs, the initial size must
+ # be 0.
+ if ex_blob_type == 'BlockBlob':
+ object_size = None
+
+ object_path = self._get_object_path(container, object_name)
+
+ upload_func = self._upload_in_chunks
+ upload_func_kwargs = {'iterator': iterator,
+ 'object_path': object_path,
+ 'blob_type': ex_blob_type,
+ 'lease': None}
+ else:
+ upload_func = self._stream_data
+ upload_func_kwargs = {'iterator': iterator,
+ 'chunked': False,
+ 'calculate_hash': verify_hash}
+
+ return self._put_object(container=container,
+ object_name=object_name,
+ object_size=object_size,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ file_path=file_path, extra=extra,
+ verify_hash=verify_hash,
+ blob_type=ex_blob_type,
+ use_lease=ex_use_lease)
+
+ def upload_object_via_stream(self, iterator, container, object_name,
+ verify_hash=False, extra=None,
+ ex_use_lease=False, ex_blob_type=None,
+ ex_page_blob_size=None):
+ """
+ @inherits: :class:`StorageDriver.upload_object_via_stream`
+
+ :param ex_blob_type: Storage class
+ :type ex_blob_type: ``str``
+
+ :param ex_page_blob_size: The maximum size to which the
+ page blob can grow to
+ :type ex_page_blob_size: ``int``
+
+ :param ex_use_lease: Indicates if we must take a lease before upload
+ :type ex_use_lease: ``bool``
+ """
+
+ if ex_blob_type is None:
+ ex_blob_type = self.ex_blob_type
+
+ self._check_values(ex_blob_type, ex_page_blob_size)
+
+ object_path = self._get_object_path(container, object_name)
+
+ upload_func = self._upload_in_chunks
+ upload_func_kwargs = {'iterator': iterator,
+ 'object_path': object_path,
+ 'blob_type': ex_blob_type,
+ 'lease': None}
+
+ return self._put_object(container=container,
+ object_name=object_name,
+ object_size=ex_page_blob_size,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, verify_hash=verify_hash,
+ blob_type=ex_blob_type,
+ use_lease=ex_use_lease)
+
+ def delete_object(self, obj):
+ """
+ @inherits: :class:`StorageDriver.delete_object`
+ """
+ object_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(object_path, method='DELETE')
+
+ if response.status == httplib.ACCEPTED:
+ return True
+ elif response.status == httplib.NOT_FOUND:
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=obj.name)
+
+ return False
+
+ def _update_metadata(self, headers, meta_data):
+ """
+ Update the given metadata in the headers
+
+ :param headers: The headers dictionary to be updated
+ :type headers: ``dict``
+
+ :param meta_data: Metadata key value pairs
+ :type meta_data: ``dict``
+ """
+ for key, value in list(meta_data.items()):
+ key = 'x-ms-meta-%s' % (key)
+ headers[key] = value
+
+ def _prepare_upload_headers(self, object_name, object_size,
+ extra, meta_data, blob_type):
+ """
+ Prepare headers for uploading an object
+
+ :param object_name: The full name of the object being updated
+ :type object_name: ``str``
+
+ :param object_size: The size of the object. In case of PageBlobs,
+ this indicates the maximum size the blob can grow to
+ :type object_size: ``int``
+
+ :param extra: Extra control data for the upload
+ :type extra: ``dict``
+
+ :param meta_data: Metadata key value pairs
+ :type meta_data: ``dict``
+
+ :param blob_type: Page or Block blob type
+ :type blob_type: ``str``
+ """
+ headers = {}
+
+ if blob_type is None:
+ blob_type = self.ex_blob_type
+
+ headers['x-ms-blob-type'] = blob_type
+
+ self._update_metadata(headers, meta_data)
+
+ if object_size is not None:
+ headers['Content-Length'] = object_size
+
+ if blob_type == 'PageBlob':
+ headers['Content-Length'] = 0
+ headers['x-ms-blob-content-length'] = object_size
+
+ return headers
+
+ def _put_object(self, container, object_name, object_size, upload_func,
+ upload_func_kwargs, file_path=None, extra=None,
+ verify_hash=True, blob_type=None, use_lease=False):
+ """
+ Control function that does the real job of uploading data to a blob
+ """
+ extra = extra or {}
+ meta_data = extra.get('meta_data', {})
+ content_type = extra.get('content_type', None)
+
+ headers = self._prepare_upload_headers(object_name, object_size,
+ extra, meta_data, blob_type)
+
+ object_path = self._get_object_path(container, object_name)
+
+ # Get a lease if required and do the operations
+ with AzureBlobLease(self, object_path, use_lease) as lease:
+ if 'lease' in upload_func_kwargs:
+ upload_func_kwargs['lease'] = lease
+
+ lease.update_headers(headers)
+
+ iterator = iter('')
+ result_dict = self._upload_object(object_name, content_type,
+ upload_func, upload_func_kwargs,
+ object_path, headers=headers,
+ file_path=file_path,
+ iterator=iterator)
+
+ response = result_dict['response']
+ bytes_transferred = result_dict['bytes_transferred']
+ data_hash = result_dict['data_hash']
+ headers = response.headers
+ response = response.response
+
+ if response.status != httplib.CREATED:
+ raise LibcloudError(
+ 'Unexpected status code, status_code=%s' % (response.status),
+ driver=self)
+
+ server_hash = headers['content-md5']
+
+ if server_hash:
+ server_hash = binascii.hexlify(base64.b64decode(b(server_hash)))
+ server_hash = server_hash.decode('utf-8')
+ else:
+ # TODO: HACK - We could poll the object for a while and get
+ # the hash
+ pass
+
+ if (verify_hash and server_hash and data_hash != server_hash):
+ raise ObjectHashMismatchError(
+ value='MD5 hash checksum does not match',
+ object_name=object_name, driver=self)
+
+ return Object(name=object_name, size=bytes_transferred,
+ hash=headers['etag'], extra=None,
+ meta_data=meta_data, container=container,
+ driver=self)
+
+ def ex_set_object_metadata(self, obj, meta_data):
+ """
+ Set metadata for an object
+
+ :param obj: The blob object
+ :type obj: :class:`Object`
+
+ :param meta_data: Metadata key value pairs
+ :type meta_data: ``dict``
+ """
+ object_path = self._get_object_path(obj.container, obj.name)
+ params = {'comp': 'metadata'}
+ headers = {}
+
+ self._update_metadata(headers, meta_data)
+
+ response = self.connection.request(object_path, method='PUT',
+ params=params,
+ headers=headers)
+
+ if response.status != httplib.OK:
+ response.parse_error('Setting metadata')
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/cloudfiles.py b/awx/lib/site-packages/libcloud/storage/drivers/cloudfiles.py
new file mode 100644
index 0000000000..bce89a865c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/cloudfiles.py
@@ -0,0 +1,994 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from hashlib import sha1
+import hmac
+import os
+from time import time
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlencode
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.py3 import PY3
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import urlquote
+
+if PY3:
+ from io import FileIO as file
+
+from libcloud.utils.files import read_in_chunks
+from libcloud.common.types import MalformedResponseError, LibcloudError
+from libcloud.common.base import Response, RawResponse
+
+from libcloud.storage.providers import Provider
+from libcloud.storage.base import Object, Container, StorageDriver
+from libcloud.storage.types import ContainerAlreadyExistsError
+from libcloud.storage.types import ContainerDoesNotExistError
+from libcloud.storage.types import ContainerIsNotEmptyError
+from libcloud.storage.types import ObjectDoesNotExistError
+from libcloud.storage.types import ObjectHashMismatchError
+from libcloud.storage.types import InvalidContainerNameError
+from libcloud.common.openstack import OpenStackBaseConnection
+from libcloud.common.openstack import OpenStackDriverMixin
+
+from libcloud.common.rackspace import AUTH_URL
+
+CDN_HOST = 'cdn.clouddrive.com'
+API_VERSION = 'v1.0'
+
+# Keys which are used to select a correct endpoint from the service catalog.
+INTERNAL_ENDPOINT_KEY = 'internalURL'
+PUBLIC_ENDPOINT_KEY = 'publicURL'
+
+
+class CloudFilesResponse(Response):
+ valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT]
+
+ def success(self):
+ i = int(self.status)
+ return i >= 200 and i <= 299 or i in self.valid_response_codes
+
+ def parse_body(self):
+ if not self.body:
+ return None
+
+ if 'content-type' in self.headers:
+ key = 'content-type'
+ elif 'Content-Type' in self.headers:
+ key = 'Content-Type'
+ else:
+ raise LibcloudError('Missing content-type header')
+
+ content_type = self.headers[key]
+ if content_type.find(';') != -1:
+ content_type = content_type.split(';')[0]
+
+ if content_type == 'application/json':
+ try:
+ data = json.loads(self.body)
+ except:
+ raise MalformedResponseError('Failed to parse JSON',
+ body=self.body,
+ driver=CloudFilesStorageDriver)
+ elif content_type == 'text/plain':
+ data = self.body
+ else:
+ data = self.body
+
+ return data
+
+
+class CloudFilesRawResponse(CloudFilesResponse, RawResponse):
+ pass
+
+
+class OpenStackSwiftConnection(OpenStackBaseConnection):
+ """
+ Connection class for the OpenStack Swift endpoint.
+ """
+
+ responseCls = CloudFilesResponse
+ rawResponseCls = CloudFilesRawResponse
+
+ auth_url = AUTH_URL
+ _auth_version = '1.0'
+
+ # TODO: Reverse the relationship - Swift -> CloudFiles
+ def __init__(self, user_id, key, secure=True, **kwargs):
+ # Ignore this for now
+ kwargs.pop('use_internal_url', None)
+ super(OpenStackSwiftConnection, self).__init__(user_id, key,
+ secure=secure,
+ **kwargs)
+ self.api_version = API_VERSION
+ self.accept_format = 'application/json'
+
+ self._service_type = self._ex_force_service_type or 'object-store'
+ self._service_name = self._ex_force_service_name or 'swift'
+
+ if self._ex_force_service_region:
+ self._service_region = self._ex_force_service_region
+ else:
+ self._service_region = None
+
+ def get_endpoint(self, *args, **kwargs):
+ if '2.0' in self._auth_version:
+ endpoint = self.service_catalog.get_endpoint(
+ service_type=self._service_type,
+ name=self._service_name,
+ region=self._service_region)
+ elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
+ endpoint = self.service_catalog.get_endpoint(
+ name=self._service_name, region=self._service_region)
+
+ if PUBLIC_ENDPOINT_KEY in endpoint:
+ return endpoint[PUBLIC_ENDPOINT_KEY]
+ else:
+ raise LibcloudError('Could not find specified endpoint')
+
+ def request(self, action, params=None, data='', headers=None, method='GET',
+ raw=False, cdn_request=False):
+ if not headers:
+ headers = {}
+ if not params:
+ params = {}
+
+ self.cdn_request = cdn_request
+ params['format'] = 'json'
+
+ if method in ['POST', 'PUT'] and 'Content-Type' not in headers:
+ headers.update({'Content-Type': 'application/json; charset=UTF-8'})
+
+ return super(OpenStackSwiftConnection, self).request(
+ action=action,
+ params=params, data=data,
+ method=method, headers=headers,
+ raw=raw)
+
+
+class CloudFilesConnection(OpenStackSwiftConnection):
+ """
+ Base connection class for the Cloudfiles driver.
+ """
+
+ responseCls = CloudFilesResponse
+ rawResponseCls = CloudFilesRawResponse
+
+ auth_url = AUTH_URL
+ _auth_version = '2.0'
+
+ def __init__(self, user_id, key, secure=True,
+ use_internal_url=False, **kwargs):
+ super(CloudFilesConnection, self).__init__(user_id, key, secure=secure,
+ **kwargs)
+ self.api_version = API_VERSION
+ self.accept_format = 'application/json'
+ self.cdn_request = False
+ self.use_internal_url = use_internal_url
+
+ def _get_endpoint_key(self):
+ if self.use_internal_url:
+ endpoint_key = INTERNAL_ENDPOINT_KEY
+ else:
+ endpoint_key = PUBLIC_ENDPOINT_KEY
+
+ if self.cdn_request:
+ # cdn endpoints don't have internal urls
+ endpoint_key = PUBLIC_ENDPOINT_KEY
+
+ return endpoint_key
+
+ def get_endpoint(self):
+ region = self._ex_force_service_region.upper()
+
+ if '2.0' in self._auth_version:
+ ep = self.service_catalog.get_endpoint(
+ service_type='object-store',
+ name='cloudFiles',
+ region=region)
+ cdn_ep = self.service_catalog.get_endpoint(
+ service_type='rax:object-cdn',
+ name='cloudFilesCDN',
+ region=region)
+ else:
+ raise LibcloudError(
+ 'Auth version "%s" not supported' % (self._auth_version))
+
+ # if this is a CDN request, return the cdn url instead
+ if self.cdn_request:
+ ep = cdn_ep
+
+ endpoint_key = self._get_endpoint_key()
+
+ if not ep:
+ raise LibcloudError('Could not find specified endpoint')
+
+ if endpoint_key in ep:
+ return ep[endpoint_key]
+ else:
+ raise LibcloudError('Could not find specified endpoint')
+
+ def request(self, action, params=None, data='', headers=None, method='GET',
+ raw=False, cdn_request=False):
+ if not headers:
+ headers = {}
+ if not params:
+ params = {}
+
+ self.cdn_request = cdn_request
+ params['format'] = 'json'
+
+ if method in ['POST', 'PUT'] and 'Content-Type' not in headers:
+ headers.update({'Content-Type': 'application/json; charset=UTF-8'})
+
+ return super(CloudFilesConnection, self).request(
+ action=action,
+ params=params, data=data,
+ method=method, headers=headers,
+ raw=raw)
+
+
+class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin):
+ """
+ CloudFiles driver.
+ """
+ name = 'CloudFiles'
+ website = 'http://www.rackspace.com/'
+
+ connectionCls = CloudFilesConnection
+ hash_type = 'md5'
+ supports_chunked_encoding = True
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region='ord', use_internal_url=False, **kwargs):
+ """
+ @inherits: :class:`StorageDriver.__init__`
+
+ :param region: ID of the region which should be used.
+ :type region: ``str``
+ """
+ # This is here for backard compatibility
+ if 'ex_force_service_region' in kwargs:
+ region = kwargs['ex_force_service_region']
+
+ self.use_internal_url = use_internal_url
+ OpenStackDriverMixin.__init__(self, (), **kwargs)
+ super(CloudFilesStorageDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, region=region,
+ **kwargs)
+
+ def iterate_containers(self):
+ response = self.connection.request('')
+
+ if response.status == httplib.NO_CONTENT:
+ return []
+ elif response.status == httplib.OK:
+ return self._to_container_list(json.loads(response.body))
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def get_container(self, container_name):
+ container_name_encoded = self._encode_container_name(container_name)
+ response = self.connection.request('/%s' % (container_name_encoded),
+ method='HEAD')
+
+ if response.status == httplib.NO_CONTENT:
+ container = self._headers_to_container(
+ container_name, response.headers)
+ return container
+ elif response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(None, self, container_name)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def get_object(self, container_name, object_name):
+ container = self.get_container(container_name)
+ container_name_encoded = self._encode_container_name(container_name)
+ object_name_encoded = self._encode_object_name(object_name)
+
+ response = self.connection.request('/%s/%s' % (container_name_encoded,
+ object_name_encoded),
+ method='HEAD')
+ if response.status in [httplib.OK, httplib.NO_CONTENT]:
+ obj = self._headers_to_object(
+ object_name, container, response.headers)
+ return obj
+ elif response.status == httplib.NOT_FOUND:
+ raise ObjectDoesNotExistError(None, self, object_name)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def get_container_cdn_url(self, container):
+ container_name_encoded = self._encode_container_name(container.name)
+ response = self.connection.request('/%s' % (container_name_encoded),
+ method='HEAD',
+ cdn_request=True)
+
+ if response.status == httplib.NO_CONTENT:
+ cdn_url = response.headers['x-cdn-uri']
+ return cdn_url
+ elif response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value='',
+ container_name=container.name,
+ driver=self)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def get_object_cdn_url(self, obj):
+ container_cdn_url = self.get_container_cdn_url(container=obj.container)
+ return '%s/%s' % (container_cdn_url, obj.name)
+
+ def enable_container_cdn(self, container, ex_ttl=None):
+ """
+ @inherits: :class:`StorageDriver.enable_container_cdn`
+
+ :param ex_ttl: cache time to live
+ :type ex_ttl: ``int``
+ """
+ container_name = container.name
+ headers = {'X-CDN-Enabled': 'True'}
+
+ if ex_ttl:
+ headers['X-TTL'] = ex_ttl
+
+ response = self.connection.request('/%s' % (container_name),
+ method='PUT',
+ headers=headers,
+ cdn_request=True)
+
+ return response.status in [httplib.CREATED, httplib.ACCEPTED]
+
+ def create_container(self, container_name):
+ container_name_encoded = self._encode_container_name(container_name)
+ response = self.connection.request(
+ '/%s' % (container_name_encoded), method='PUT')
+
+ if response.status == httplib.CREATED:
+ # Accepted mean that container is not yet created but it will be
+ # eventually
+ extra = {'object_count': 0}
+ container = Container(name=container_name,
+ extra=extra, driver=self)
+
+ return container
+ elif response.status == httplib.ACCEPTED:
+ error = ContainerAlreadyExistsError(None, self, container_name)
+ raise error
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def delete_container(self, container):
+ name = self._encode_container_name(container.name)
+
+ # Only empty container can be deleted
+ response = self.connection.request('/%s' % (name), method='DELETE')
+
+ if response.status == httplib.NO_CONTENT:
+ return True
+ elif response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value='',
+ container_name=name, driver=self)
+ elif response.status == httplib.CONFLICT:
+ # @TODO: Add "delete_all_objects" parameter?
+ raise ContainerIsNotEmptyError(value='',
+ container_name=name, driver=self)
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ container_name = obj.container.name
+ object_name = obj.name
+ response = self.connection.request('/%s/%s' % (container_name,
+ object_name),
+ method='GET', raw=True)
+
+ return self._get_object(
+ obj=obj, callback=self._save_object, response=response,
+ callback_kwargs={'obj': obj,
+ 'response': response.response,
+ 'destination_path': destination_path,
+ 'overwrite_existing': overwrite_existing,
+ 'delete_on_failure': delete_on_failure},
+ success_status_code=httplib.OK)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ container_name = obj.container.name
+ object_name = obj.name
+ response = self.connection.request('/%s/%s' % (container_name,
+ object_name),
+ method='GET', raw=True)
+
+ return self._get_object(obj=obj, callback=read_in_chunks,
+ response=response,
+ callback_kwargs={'iterator': response.response,
+ 'chunk_size': chunk_size},
+ success_status_code=httplib.OK)
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True):
+ """
+ Upload an object.
+
+ Note: This will override file with a same name if it already exists.
+ """
+ upload_func = self._upload_file
+ upload_func_kwargs = {'file_path': file_path}
+
+ return self._put_object(container=container, object_name=object_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, file_path=file_path,
+ verify_hash=verify_hash)
+
+ def upload_object_via_stream(self, iterator,
+ container, object_name, extra=None):
+ if isinstance(iterator, file):
+ iterator = iter(iterator)
+
+ upload_func = self._stream_data
+ upload_func_kwargs = {'iterator': iterator}
+
+ return self._put_object(container=container, object_name=object_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, iterator=iterator)
+
+ def delete_object(self, obj):
+ container_name = self._encode_container_name(obj.container.name)
+ object_name = self._encode_object_name(obj.name)
+
+ response = self.connection.request(
+ '/%s/%s' % (container_name, object_name), method='DELETE')
+
+ if response.status == httplib.NO_CONTENT:
+ return True
+ elif response.status == httplib.NOT_FOUND:
+ raise ObjectDoesNotExistError(value='', object_name=object_name,
+ driver=self)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def ex_purge_object_from_cdn(self, obj, email=None):
+ """
+ Purge edge cache for the specified object.
+
+ :param email: Email where a notification will be sent when the job
+ completes. (optional)
+ :type email: ``str``
+ """
+ container_name = self._encode_container_name(obj.container.name)
+ object_name = self._encode_object_name(obj.name)
+ headers = {'X-Purge-Email': email} if email else {}
+
+ response = self.connection.request('/%s/%s' % (container_name,
+ object_name),
+ method='DELETE',
+ headers=headers,
+ cdn_request=True)
+
+ return response.status == httplib.NO_CONTENT
+
+ def ex_get_meta_data(self):
+ """
+ Get meta data
+
+ :rtype: ``dict``
+ """
+ response = self.connection.request('', method='HEAD')
+
+ if response.status == httplib.NO_CONTENT:
+ container_count = response.headers.get(
+ 'x-account-container-count', 'unknown')
+ object_count = response.headers.get(
+ 'x-account-object-count', 'unknown')
+ bytes_used = response.headers.get(
+ 'x-account-bytes-used', 'unknown')
+ temp_url_key = response.headers.get(
+ 'x-account-meta-temp-url-key', None)
+
+ return {'container_count': int(container_count),
+ 'object_count': int(object_count),
+ 'bytes_used': int(bytes_used),
+ 'temp_url_key': temp_url_key}
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status))
+
+ def ex_multipart_upload_object(self, file_path, container, object_name,
+ chunk_size=33554432, extra=None,
+ verify_hash=True):
+ object_size = os.path.getsize(file_path)
+ if object_size < chunk_size:
+ return self.upload_object(file_path, container, object_name,
+ extra=extra, verify_hash=verify_hash)
+
+ iter_chunk_reader = FileChunkReader(file_path, chunk_size)
+
+ for index, iterator in enumerate(iter_chunk_reader):
+ self._upload_object_part(container=container,
+ object_name=object_name,
+ part_number=index,
+ iterator=iterator,
+ verify_hash=verify_hash)
+
+ return self._upload_object_manifest(container=container,
+ object_name=object_name,
+ extra=extra,
+ verify_hash=verify_hash)
+
+ def ex_enable_static_website(self, container, index_file='index.html'):
+ """
+ Enable serving a static website.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param index_file: Name of the object which becomes an index page for
+ every sub-directory in this container.
+ :type index_file: ``str``
+
+ :rtype: ``bool``
+ """
+ container_name = container.name
+ headers = {'X-Container-Meta-Web-Index': index_file}
+
+ response = self.connection.request('/%s' % (container_name),
+ method='POST',
+ headers=headers,
+ cdn_request=False)
+
+ return response.status in [httplib.CREATED, httplib.ACCEPTED]
+
+ def ex_set_error_page(self, container, file_name='error.html'):
+ """
+ Set a custom error page which is displayed if file is not found and
+ serving of a static website is enabled.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param file_name: Name of the object which becomes the error page.
+ :type file_name: ``str``
+
+ :rtype: ``bool``
+ """
+ container_name = container.name
+ headers = {'X-Container-Meta-Web-Error': file_name}
+
+ response = self.connection.request('/%s' % (container_name),
+ method='POST',
+ headers=headers,
+ cdn_request=False)
+
+ return response.status in [httplib.CREATED, httplib.ACCEPTED]
+
+ def ex_set_account_metadata_temp_url_key(self, key):
+ """
+ Set the metadata header X-Account-Meta-Temp-URL-Key on your Cloud
+ Files account.
+
+ :param key: X-Account-Meta-Temp-URL-Key
+ :type key: ``str``
+
+ :rtype: ``bool``
+ """
+ headers = {'X-Account-Meta-Temp-URL-Key': key}
+
+ response = self.connection.request('',
+ method='POST',
+ headers=headers,
+ cdn_request=False)
+
+ return response.status in [httplib.OK, httplib.NO_CONTENT,
+ httplib.CREATED, httplib.ACCEPTED]
+
+ def ex_get_object_temp_url(self, obj, method='GET', timeout=60):
+ """
+ Create a temporary URL to allow others to retrieve or put objects
+ in your Cloud Files account for as long or as short a time as you
+ wish. This method is specifically for allowing users to retrieve
+ or update an object.
+
+ :param obj: The object that you wish to make temporarily public
+ :type obj: :class:`Object`
+
+ :param method: Which method you would like to allow, 'PUT' or 'GET'
+ :type method: ``str``
+
+ :param timeout: Time (in seconds) after which you want the TempURL
+ to expire.
+ :type timeout: ``int``
+
+ :rtype: ``bool``
+ """
+ self.connection._populate_hosts_and_request_paths()
+ expires = int(time() + timeout)
+ path = '%s/%s/%s' % (self.connection.request_path,
+ obj.container.name, obj.name)
+ try:
+ key = self.ex_get_meta_data()['temp_url_key']
+ assert key is not None
+ except Exception:
+ raise KeyError('You must first set the ' +
+ 'X-Account-Meta-Temp-URL-Key header on your ' +
+ 'Cloud Files account using ' +
+ 'ex_set_account_metadata_temp_url_key before ' +
+ 'you can use this method.')
+ hmac_body = '%s\n%s\n%s' % (method, expires, path)
+ sig = hmac.new(b(key), b(hmac_body), sha1).hexdigest()
+ params = urlencode({'temp_url_sig': sig,
+ 'temp_url_expires': expires})
+
+ temp_url = 'https://%s/%s/%s?%s' %\
+ (self.connection.host + self.connection.request_path,
+ obj.container.name, obj.name, params)
+
+ return temp_url
+
+ def _upload_object_part(self, container, object_name, part_number,
+ iterator, verify_hash=True):
+ upload_func = self._stream_data
+ upload_func_kwargs = {'iterator': iterator}
+ part_name = object_name + '/%08d' % part_number
+ extra = {'content_type': 'application/octet-stream'}
+
+ self._put_object(container=container,
+ object_name=part_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, iterator=iterator,
+ verify_hash=verify_hash)
+
+ def _upload_object_manifest(self, container, object_name, extra=None,
+ verify_hash=True):
+ extra = extra or {}
+ meta_data = extra.get('meta_data')
+
+ container_name_encoded = self._encode_container_name(container.name)
+ object_name_encoded = self._encode_object_name(object_name)
+ request_path = '/%s/%s' % (container_name_encoded, object_name_encoded)
+
+ headers = {'X-Auth-Token': self.connection.auth_token,
+ 'X-Object-Manifest': '%s/%s/' %
+ (container_name_encoded,
+ object_name_encoded)}
+
+ data = ''
+ response = self.connection.request(request_path,
+ method='PUT', data=data,
+ headers=headers, raw=True)
+
+ object_hash = None
+
+ if verify_hash:
+ hash_function = self._get_hash_function()
+ hash_function.update(b(data))
+ data_hash = hash_function.hexdigest()
+ object_hash = response.headers.get('etag')
+
+ if object_hash != data_hash:
+ raise ObjectHashMismatchError(
+ value=('MD5 hash checksum does not match (expected=%s, ' +
+ 'actual=%s)') %
+ (data_hash, object_hash),
+ object_name=object_name, driver=self)
+
+ obj = Object(name=object_name, size=0, hash=object_hash, extra=None,
+ meta_data=meta_data, container=container, driver=self)
+
+ return obj
+
+ def list_container_objects(self, container, ex_prefix=None):
+ """
+ Return a list of objects for the given container.
+
+ :param container: Container instance.
+ :type container: :class:`Container`
+
+ :param ex_prefix: Only get objects with names starting with ex_prefix
+ :type ex_prefix: ``str``
+
+ :return: A list of Object instances.
+ :rtype: ``list`` of :class:`Object`
+ """
+ return list(self.iterate_container_objects(container,
+ ex_prefix=ex_prefix))
+
+ def iterate_container_objects(self, container, ex_prefix=None):
+ """
+ Return a generator of objects for the given container.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param ex_prefix: Only get objects with names starting with ex_prefix
+ :type ex_prefix: ``str``
+
+ :return: A generator of Object instances.
+ :rtype: ``generator`` of :class:`Object`
+ """
+ params = {}
+ if ex_prefix:
+ params['prefix'] = ex_prefix
+
+ while True:
+ container_name_encoded = \
+ self._encode_container_name(container.name)
+ response = self.connection.request('/%s' %
+ (container_name_encoded),
+ params=params)
+
+ if response.status == httplib.NO_CONTENT:
+ # Empty or non-existent container
+ break
+ elif response.status == httplib.OK:
+ objects = self._to_object_list(json.loads(response.body),
+ container)
+
+ if len(objects) == 0:
+ break
+
+ for obj in objects:
+ yield obj
+ params['marker'] = obj.name
+
+ else:
+ raise LibcloudError('Unexpected status code: %s' %
+ (response.status))
+
+ def _put_object(self, container, object_name, upload_func,
+ upload_func_kwargs, extra=None, file_path=None,
+ iterator=None, verify_hash=True):
+ extra = extra or {}
+ container_name_encoded = self._encode_container_name(container.name)
+ object_name_encoded = self._encode_object_name(object_name)
+ content_type = extra.get('content_type', None)
+ meta_data = extra.get('meta_data', None)
+ content_disposition = extra.get('content_disposition', None)
+
+ headers = {}
+ if meta_data:
+ for key, value in list(meta_data.items()):
+ key = 'X-Object-Meta-%s' % (key)
+ headers[key] = value
+
+ if content_disposition is not None:
+ headers['Content-Disposition'] = content_disposition
+
+ request_path = '/%s/%s' % (container_name_encoded, object_name_encoded)
+ result_dict = self._upload_object(
+ object_name=object_name, content_type=content_type,
+ upload_func=upload_func, upload_func_kwargs=upload_func_kwargs,
+ request_path=request_path, request_method='PUT',
+ headers=headers, file_path=file_path, iterator=iterator)
+
+ response = result_dict['response'].response
+ bytes_transferred = result_dict['bytes_transferred']
+ server_hash = result_dict['response'].headers.get('etag', None)
+
+ if response.status == httplib.EXPECTATION_FAILED:
+ raise LibcloudError(value='Missing content-type header',
+ driver=self)
+ elif verify_hash and not server_hash:
+ raise LibcloudError(value='Server didn\'t return etag',
+ driver=self)
+ elif (verify_hash and result_dict['data_hash'] != server_hash):
+ raise ObjectHashMismatchError(
+ value=('MD5 hash checksum does not match (expected=%s, ' +
+ 'actual=%s)') % (result_dict['data_hash'], server_hash),
+ object_name=object_name, driver=self)
+ elif response.status == httplib.CREATED:
+ obj = Object(
+ name=object_name, size=bytes_transferred, hash=server_hash,
+ extra=None, meta_data=meta_data, container=container,
+ driver=self)
+
+ return obj
+ else:
+ # @TODO: Add test case for this condition (probably 411)
+ raise LibcloudError('status_code=%s' % (response.status),
+ driver=self)
+
+ def _encode_container_name(self, name):
+ """
+ Encode container name so it can be used as part of the HTTP request.
+ """
+ if name.startswith('/'):
+ name = name[1:]
+ name = urlquote(name)
+
+ if name.find('/') != -1:
+ raise InvalidContainerNameError(value='Container name cannot'
+ ' contain slashes',
+ container_name=name, driver=self)
+
+ if len(name) > 256:
+ raise InvalidContainerNameError(
+ value='Container name cannot be longer than 256 bytes',
+ container_name=name, driver=self)
+
+ return name
+
+ def _encode_object_name(self, name):
+ name = urlquote(name)
+ return name
+
+ def _to_container_list(self, response):
+ # @TODO: Handle more than 10k containers - use "lazy list"?
+ for container in response:
+ extra = {'object_count': int(container['count']),
+ 'size': int(container['bytes'])}
+ yield Container(name=container['name'], extra=extra, driver=self)
+
+ def _to_object_list(self, response, container):
+ objects = []
+
+ for obj in response:
+ name = obj['name']
+ size = int(obj['bytes'])
+ hash = obj['hash']
+ extra = {'content_type': obj['content_type'],
+ 'last_modified': obj['last_modified']}
+ objects.append(Object(
+ name=name, size=size, hash=hash, extra=extra,
+ meta_data=None, container=container, driver=self))
+
+ return objects
+
+ def _headers_to_container(self, name, headers):
+ size = int(headers.get('x-container-bytes-used', 0))
+ object_count = int(headers.get('x-container-object-count', 0))
+
+ extra = {'object_count': object_count,
+ 'size': size}
+ container = Container(name=name, extra=extra, driver=self)
+ return container
+
+ def _headers_to_object(self, name, container, headers):
+ size = int(headers.pop('content-length', 0))
+ last_modified = headers.pop('last-modified', None)
+ etag = headers.pop('etag', None)
+ content_type = headers.pop('content-type', None)
+
+ meta_data = {}
+ for key, value in list(headers.items()):
+ if key.find('x-object-meta-') != -1:
+ key = key.replace('x-object-meta-', '')
+ meta_data[key] = value
+
+ extra = {'content_type': content_type, 'last_modified': last_modified}
+
+ obj = Object(name=name, size=size, hash=etag, extra=extra,
+ meta_data=meta_data, container=container, driver=self)
+ return obj
+
+ def _ex_connection_class_kwargs(self):
+ kwargs = self.openstack_connection_kwargs()
+ kwargs['ex_force_service_region'] = self.region
+ kwargs['use_internal_url'] = self.use_internal_url
+ return kwargs
+
+
+class CloudFilesUSStorageDriver(CloudFilesStorageDriver):
+ """
+ Cloudfiles storage driver for the US endpoint.
+ """
+
+ type = Provider.CLOUDFILES_US
+ name = 'CloudFiles (US)'
+
+ def __init__(self, *args, **kwargs):
+ kwargs['region'] = 'ord'
+ super(CloudFilesUSStorageDriver, self).__init__(*args, **kwargs)
+
+
+class OpenStackSwiftStorageDriver(CloudFilesStorageDriver):
+ """
+ Storage driver for the OpenStack Swift.
+ """
+ type = Provider.CLOUDFILES_SWIFT
+ name = 'OpenStack Swift'
+ connectionCls = OpenStackSwiftConnection
+
+ # TODO: Reverse the relationship - Swift -> CloudFiles
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ region=None, **kwargs):
+ super(OpenStackSwiftStorageDriver, self).__init__(key=key,
+ secret=secret,
+ secure=secure,
+ host=host,
+ port=port,
+ region=region,
+ **kwargs)
+
+
+class CloudFilesUKStorageDriver(CloudFilesStorageDriver):
+ """
+ Cloudfiles storage driver for the UK endpoint.
+ """
+
+ type = Provider.CLOUDFILES_UK
+ name = 'CloudFiles (UK)'
+
+ def __init__(self, *args, **kwargs):
+ kwargs['region'] = 'lon'
+ super(CloudFilesUKStorageDriver, self).__init__(*args, **kwargs)
+
+
+class FileChunkReader(object):
+ def __init__(self, file_path, chunk_size):
+ self.file_path = file_path
+ self.total = os.path.getsize(file_path)
+ self.chunk_size = chunk_size
+ self.bytes_read = 0
+ self.stop_iteration = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.stop_iteration:
+ raise StopIteration
+
+ start_block = self.bytes_read
+ end_block = start_block + self.chunk_size
+ if end_block >= self.total:
+ end_block = self.total
+ self.stop_iteration = True
+ self.bytes_read += end_block - start_block
+ return ChunkStreamReader(file_path=self.file_path,
+ start_block=start_block,
+ end_block=end_block,
+ chunk_size=8192)
+
+ def __next__(self):
+ return self.next()
+
+
+class ChunkStreamReader(object):
+ def __init__(self, file_path, start_block, end_block, chunk_size):
+ self.fd = open(file_path, 'rb')
+ self.fd.seek(start_block)
+ self.start_block = start_block
+ self.end_block = end_block
+ self.chunk_size = chunk_size
+ self.bytes_read = 0
+ self.stop_iteration = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.stop_iteration:
+ self.fd.close()
+ raise StopIteration
+
+ block_size = self.chunk_size
+ if self.bytes_read + block_size > \
+ self.end_block - self.start_block:
+ block_size = self.end_block - self.start_block - self.bytes_read
+ self.stop_iteration = True
+
+ block = self.fd.read(block_size)
+ self.bytes_read += block_size
+ return block
+
+ def __next__(self):
+ return self.next()
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/dummy.py b/awx/lib/site-packages/libcloud/storage/drivers/dummy.py
new file mode 100644
index 0000000000..affd265538
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/dummy.py
@@ -0,0 +1,490 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import random
+import hashlib
+
+from libcloud.utils.py3 import PY3
+from libcloud.utils.py3 import b
+
+if PY3:
+ from io import FileIO as file
+
+from libcloud.common.types import LibcloudError
+
+from libcloud.storage.base import Object, Container, StorageDriver
+from libcloud.storage.types import ContainerAlreadyExistsError
+from libcloud.storage.types import ContainerDoesNotExistError
+from libcloud.storage.types import ContainerIsNotEmptyError
+from libcloud.storage.types import ObjectDoesNotExistError
+
+
+class DummyFileObject(file):
+ def __init__(self, yield_count=5, chunk_len=10):
+ self._yield_count = yield_count
+ self._chunk_len = chunk_len
+
+ def read(self, size):
+ i = 0
+
+ while i < self._yield_count:
+ yield self._get_chunk(self._chunk_len)
+ i += 1
+
+ raise StopIteration
+
+ def _get_chunk(self, chunk_len):
+ chunk = [str(x) for x in random.randint(97, 120)]
+ return chunk
+
+ def __len__(self):
+ return self._yield_count * self._chunk_len
+
+
+class DummyIterator(object):
+ def __init__(self, data=None):
+ self.hash = hashlib.md5()
+ self._data = data or []
+ self._current_item = 0
+
+ def get_md5_hash(self):
+ return self.hash.hexdigest()
+
+ def next(self):
+ if self._current_item == len(self._data):
+ raise StopIteration
+
+ value = self._data[self._current_item]
+ self.hash.update(b(value))
+ self._current_item += 1
+ return value
+
+ def __next__(self):
+ return self.next()
+
+
+class DummyStorageDriver(StorageDriver):
+ """
+ Dummy Storage driver.
+
+ >>> from libcloud.storage.drivers.dummy import DummyStorageDriver
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container = driver.create_container(container_name='test container')
+ >>> container
+
+ >>> container.name
+ 'test container'
+ >>> container.extra['object_count']
+ 0
+ """
+
+ name = 'Dummy Storage Provider'
+ website = 'http://example.com'
+
+ def __init__(self, api_key, api_secret):
+ """
+ :param api_key: API key or username to used (required)
+ :type api_key: ``str``
+ :param api_secret: Secret password to be used (required)
+ :type api_secret: ``str``
+ :rtype: ``None``
+ """
+ self._containers = {}
+
+ def get_meta_data(self):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> driver.get_meta_data()['object_count']
+ 0
+ >>> driver.get_meta_data()['container_count']
+ 0
+ >>> driver.get_meta_data()['bytes_used']
+ 0
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container_name = 'test container 2'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> obj = container.upload_object_via_stream(
+ ... object_name='test object', iterator=DummyFileObject(5, 10),
+ ... extra={})
+ >>> driver.get_meta_data()['object_count']
+ 1
+ >>> driver.get_meta_data()['container_count']
+ 2
+ >>> driver.get_meta_data()['bytes_used']
+ 50
+
+ :rtype: ``dict``
+ """
+
+ container_count = len(self._containers)
+ object_count = sum([len(self._containers[container]['objects']) for
+ container in self._containers])
+
+ bytes_used = 0
+ for container in self._containers:
+ objects = self._containers[container]['objects']
+ for _, obj in objects.items():
+ bytes_used += obj.size
+
+ return {'container_count': int(container_count),
+ 'object_count': int(object_count),
+ 'bytes_used': int(bytes_used)}
+
+ def iterate_containers(self):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> list(driver.iterate_containers())
+ []
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> container.name
+ 'test container 1'
+ >>> container_name = 'test container 2'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> container = driver.create_container(
+ ... container_name='test container 2')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerAlreadyExistsError:
+ >>> container_list=list(driver.iterate_containers())
+ >>> sorted([c.name for c in container_list])
+ ['test container 1', 'test container 2']
+
+ @inherits: :class:`StorageDriver.iterate_containers`
+ """
+
+ for container in list(self._containers.values()):
+ yield container['container']
+
+ def list_container_objects(self, container):
+ container = self.get_container(container.name)
+
+ return container.objects
+
+ def get_container(self, container_name):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerDoesNotExistError:
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> container.name
+ 'test container 1'
+ >>> driver.get_container('test container 1')
+
+
+ @inherits: :class:`StorageDriver.get_container`
+ """
+
+ if container_name not in self._containers:
+ raise ContainerDoesNotExistError(driver=self, value=None,
+ container_name=container_name)
+
+ return self._containers[container_name]['container']
+
+ def get_container_cdn_url(self, container):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerDoesNotExistError:
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> container.name
+ 'test container 1'
+ >>> container.get_cdn_url()
+ 'http://www.test.com/container/test_container_1'
+
+ @inherits: :class:`StorageDriver.get_container_cdn_url`
+ """
+
+ if container.name not in self._containers:
+ raise ContainerDoesNotExistError(driver=self, value=None,
+ container_name=container.name)
+
+ return self._containers[container.name]['cdn_url']
+
+ def get_object(self, container_name, object_name):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> driver.get_object('unknown', 'unknown')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerDoesNotExistError:
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> driver.get_object(
+ ... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ObjectDoesNotExistError:
+ >>> obj = container.upload_object_via_stream(object_name='test object',
+ ... iterator=DummyFileObject(5, 10), extra={})
+ >>> obj.name
+ 'test object'
+ >>> obj.size
+ 50
+
+ @inherits: :class:`StorageDriver.get_object`
+ """
+
+ self.get_container(container_name)
+ container_objects = self._containers[container_name]['objects']
+ if object_name not in container_objects:
+ raise ObjectDoesNotExistError(object_name=object_name, value=None,
+ driver=self)
+
+ return container_objects[object_name]
+
+ def get_object_cdn_url(self, obj):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> obj = container.upload_object_via_stream(
+ ... object_name='test object 5',
+ ... iterator=DummyFileObject(5, 10), extra={})
+ >>> obj.name
+ 'test object 5'
+ >>> obj.get_cdn_url()
+ 'http://www.test.com/object/test_object_5'
+
+ @inherits: :class:`StorageDriver.get_object_cdn_url`
+ """
+
+ container_name = obj.container.name
+ container_objects = self._containers[container_name]['objects']
+ if obj.name not in container_objects:
+ raise ObjectDoesNotExistError(object_name=obj.name, value=None,
+ driver=self)
+
+ return container_objects[obj.name].meta_data['cdn_url']
+
+ def create_container(self, container_name):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container
+
+ >>> container = driver.create_container(
+ ... container_name='test container 1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerAlreadyExistsError:
+
+ @inherits: :class:`StorageDriver.create_container`
+ """
+
+ if container_name in self._containers:
+ raise ContainerAlreadyExistsError(container_name=container_name,
+ value=None, driver=self)
+
+ extra = {'object_count': 0}
+ container = Container(name=container_name, extra=extra, driver=self)
+
+ self._containers[container_name] = {'container': container,
+ 'objects': {},
+ 'cdn_url':
+ 'http://www.test.com/container/%s'
+ %
+ (container_name.replace(' ', '_'))
+ }
+ return container
+
+ def delete_container(self, container):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container = Container(name = 'test container',
+ ... extra={'object_count': 0}, driver=driver)
+ >>> driver.delete_container(container=container)
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerDoesNotExistError:
+ >>> container = driver.create_container(
+ ... container_name='test container 1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> len(driver._containers)
+ 1
+ >>> driver.delete_container(container=container)
+ True
+ >>> len(driver._containers)
+ 0
+ >>> container = driver.create_container(
+ ... container_name='test container 1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> obj = container.upload_object_via_stream(
+ ... object_name='test object', iterator=DummyFileObject(5, 10),
+ ... extra={})
+ >>> driver.delete_container(container=container)
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ContainerIsNotEmptyError:
+
+ @inherits: :class:`StorageDriver.delete_container`
+ """
+
+ container_name = container.name
+ if container_name not in self._containers:
+ raise ContainerDoesNotExistError(container_name=container_name,
+ value=None, driver=self)
+
+ container = self._containers[container_name]
+ if len(container['objects']) > 0:
+ raise ContainerIsNotEmptyError(container_name=container_name,
+ value=None, driver=self)
+
+ del self._containers[container_name]
+ return True
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ kwargs_dict = {'obj': obj,
+ 'response': DummyFileObject(),
+ 'destination_path': destination_path,
+ 'overwrite_existing': overwrite_existing,
+ 'delete_on_failure': delete_on_failure}
+
+ return self._save_object(**kwargs_dict)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container = driver.create_container(
+ ... container_name='test container 1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> obj = container.upload_object_via_stream(object_name='test object',
+ ... iterator=DummyFileObject(5, 10), extra={})
+ >>> stream = container.download_object_as_stream(obj)
+ >>> stream #doctest: +ELLIPSIS
+ <...closed...>
+
+ @inherits: :class:`StorageDriver.download_object_as_stream`
+ """
+
+ return DummyFileObject()
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ file_hash=None):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container_name = 'test container 1'
+ >>> container = driver.create_container(container_name=container_name)
+ >>> container.upload_object(file_path='/tmp/inexistent.file',
+ ... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ LibcloudError:
+ >>> file_path = path = os.path.abspath(__file__)
+ >>> file_size = os.path.getsize(file_path)
+ >>> obj = container.upload_object(file_path=file_path,
+ ... object_name='test')
+ >>> obj #doctest: +ELLIPSIS
+
+ >>> obj.size == file_size
+ True
+
+ @inherits: :class:`StorageDriver.upload_object`
+ :param file_hash: File hash
+ :type file_hash: ``str``
+ """
+
+ if not os.path.exists(file_path):
+ raise LibcloudError(value='File %s does not exist' % (file_path),
+ driver=self)
+
+ size = os.path.getsize(file_path)
+ return self._add_object(container=container, object_name=object_name,
+ size=size, extra=extra)
+
+ def upload_object_via_stream(self, iterator, container,
+ object_name, extra=None):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container = driver.create_container(
+ ... container_name='test container 1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> obj = container.upload_object_via_stream(
+ ... object_name='test object', iterator=DummyFileObject(5, 10),
+ ... extra={})
+ >>> obj #doctest: +ELLIPSIS
+
+
+ @inherits: :class:`StorageDriver.upload_object_via_stream`
+ """
+
+ size = len(iterator)
+ return self._add_object(container=container, object_name=object_name,
+ size=size, extra=extra)
+
+ def delete_object(self, obj):
+ """
+ >>> driver = DummyStorageDriver('key', 'secret')
+ >>> container = driver.create_container(
+ ... container_name='test container 1')
+ ... #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> obj = container.upload_object_via_stream(object_name='test object',
+ ... iterator=DummyFileObject(5, 10), extra={})
+ >>> obj #doctest: +ELLIPSIS
+
+ >>> container.delete_object(obj=obj)
+ True
+ >>> obj = Object(name='test object 2',
+ ... size=1000, hash=None, extra=None,
+ ... meta_data=None, container=container,driver=None)
+ >>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ObjectDoesNotExistError:
+
+ @inherits: :class:`StorageDriver.delete_object`
+ """
+
+ container_name = obj.container.name
+ object_name = obj.name
+ obj = self.get_object(container_name=container_name,
+ object_name=object_name)
+
+ del self._containers[container_name]['objects'][object_name]
+ return True
+
+ def _add_object(self, container, object_name, size, extra=None):
+ container = self.get_container(container.name)
+
+ extra = extra or {}
+ meta_data = extra.get('meta_data', {})
+ meta_data.update({'cdn_url': 'http://www.test.com/object/%s' %
+ (object_name.replace(' ', '_'))})
+ obj = Object(name=object_name, size=size, extra=extra, hash=None,
+ meta_data=meta_data, container=container, driver=self)
+
+ self._containers[container.name]['objects'][object_name] = obj
+ return obj
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/google_storage.py b/awx/lib/site-packages/libcloud/storage/drivers/google_storage.py
new file mode 100644
index 0000000000..9caca9b9ee
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/google_storage.py
@@ -0,0 +1,136 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import copy
+import hmac
+
+from email.utils import formatdate
+from hashlib import sha1
+
+from libcloud.utils.py3 import b
+
+from libcloud.common.base import ConnectionUserAndKey
+
+from libcloud.storage.drivers.s3 import BaseS3StorageDriver, S3Response
+from libcloud.storage.drivers.s3 import S3RawResponse
+
+SIGNATURE_IDENTIFIER = 'GOOG1'
+
+# Docs are a lie. Actual namespace returned is different that the one listed in
+# the docs.
+AUTH_HOST = 'commondatastorage.googleapis.com'
+API_VERSION = '2006-03-01'
+NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION)
+
+
+class GoogleStorageConnection(ConnectionUserAndKey):
+ """
+ Repersents a single connection to the Google storage API endpoint.
+ """
+
+ host = AUTH_HOST
+ responseCls = S3Response
+ rawResponseCls = S3RawResponse
+
+ def add_default_headers(self, headers):
+ date = formatdate(usegmt=True)
+ headers['Date'] = date
+ return headers
+
+ def pre_connect_hook(self, params, headers):
+ signature = self._get_aws_auth_param(method=self.method,
+ headers=headers,
+ params=params,
+ expires=None,
+ secret_key=self.key,
+ path=self.action)
+ headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER,
+ self.user_id, signature)
+ return params, headers
+
+ def _get_aws_auth_param(self, method, headers, params, expires,
+ secret_key, path='/'):
+ # TODO: Refactor and re-use in S3 driver
+ """
+ Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
+ UTF-8-Encoding-Of( StringToSign ) ) ) );
+
+ StringToSign = HTTP-VERB + "\n" +
+ Content-MD5 + "\n" +
+ Content-Type + "\n" +
+ Date + "\n" +
+ CanonicalizedHeaders +
+ CanonicalizedResource;
+ """
+ special_header_keys = ['content-md5', 'content-type', 'date']
+ special_header_values = {}
+ extension_header_values = {}
+
+ headers_copy = copy.deepcopy(headers)
+ for key, value in list(headers_copy.items()):
+ if key.lower() in special_header_keys:
+ if key.lower() == 'date':
+ value = value.strip()
+ else:
+ value = value.lower().strip()
+ special_header_values[key.lower()] = value
+ elif key.lower().startswith('x-goog-'):
+ extension_header_values[key.lower()] = value.strip()
+
+ if 'content-md5' not in special_header_values:
+ special_header_values['content-md5'] = ''
+
+ if 'content-type' not in special_header_values:
+ special_header_values['content-type'] = ''
+
+ keys_sorted = list(special_header_values.keys())
+ keys_sorted.sort()
+
+ buf = [method]
+ for key in keys_sorted:
+ value = special_header_values[key]
+ buf.append(value)
+ string_to_sign = '\n'.join(buf)
+
+ keys_sorted = list(extension_header_values.keys())
+ keys_sorted.sort()
+
+ extension_header_string = []
+ for key in keys_sorted:
+ value = extension_header_values[key]
+ extension_header_string.append('%s:%s' % (key, value))
+ extension_header_string = '\n'.join(extension_header_string)
+
+ values_to_sign = []
+ for value in [string_to_sign, extension_header_string, path]:
+ if value:
+ values_to_sign.append(value)
+
+ string_to_sign = '\n'.join(values_to_sign)
+ b64_hmac = base64.b64encode(
+ hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
+ )
+ return b64_hmac.decode('utf-8')
+
+
+class GoogleStorageDriver(BaseS3StorageDriver):
+ name = 'Google Storage'
+ website = 'http://cloud.google.com/'
+ connectionCls = GoogleStorageConnection
+ hash_type = 'md5'
+ namespace = NAMESPACE
+ supports_chunked_encoding = False
+ supports_s3_multipart_upload = False
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/ktucloud.py b/awx/lib/site-packages/libcloud/storage/drivers/ktucloud.py
new file mode 100644
index 0000000000..9708a9b3c4
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/ktucloud.py
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.common.types import LibcloudError
+from libcloud.storage.providers import Provider
+
+from libcloud.storage.drivers.cloudfiles import CloudFilesConnection
+from libcloud.storage.drivers.cloudfiles import CloudFilesStorageDriver
+
+KTUCLOUDSTORAGE_AUTH_URL = "https://ssproxy.ucloudbiz.olleh.com/auth/v1.0"
+KTUCLOUDSTORAGE_API_VERSION = "1.0"
+
+
+class KTUCloudStorageConnection(CloudFilesConnection):
+ """
+ Connection class for the KT UCloud Storage endpoint.
+ """
+
+ auth_url = KTUCLOUDSTORAGE_AUTH_URL
+ _auth_version = KTUCLOUDSTORAGE_API_VERSION
+
+ def get_endpoint(self):
+ eps = self.service_catalog.get_endpoints(name='cloudFiles')
+ if len(eps) == 0:
+ raise LibcloudError('Could not find specified endpoint')
+ ep = eps[0]
+ if 'publicURL' in ep:
+ return ep['publicURL']
+ else:
+ raise LibcloudError('Could not find specified endpoint')
+
+
+class KTUCloudStorageDriver(CloudFilesStorageDriver):
+ """
+ Cloudfiles storage driver for the UK endpoint.
+ """
+
+ type = Provider.KTUCLOUD
+ name = 'KTUCloud Storage'
+ connectionCls = KTUCloudStorageConnection
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/local.py b/awx/lib/site-packages/libcloud/storage/drivers/local.py
new file mode 100644
index 0000000000..5e326f902e
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/local.py
@@ -0,0 +1,600 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Provides storage driver for working with local filesystem
+"""
+
+from __future__ import with_statement
+
+import errno
+import os
+import shutil
+import sys
+
+try:
+ import lockfile
+ from lockfile import LockTimeout, mkdirlockfile
+except ImportError:
+ raise ImportError('Missing lockfile dependency, you can install it '
+ 'using pip: pip install lockfile')
+
+from libcloud.utils.files import read_in_chunks
+from libcloud.utils.py3 import relpath
+from libcloud.utils.py3 import u
+from libcloud.common.base import Connection
+from libcloud.storage.base import Object, Container, StorageDriver
+from libcloud.common.types import LibcloudError
+from libcloud.storage.types import ContainerAlreadyExistsError
+from libcloud.storage.types import ContainerDoesNotExistError
+from libcloud.storage.types import ContainerIsNotEmptyError
+from libcloud.storage.types import ObjectError
+from libcloud.storage.types import ObjectDoesNotExistError
+from libcloud.storage.types import InvalidContainerNameError
+
+IGNORE_FOLDERS = ['.lock', '.hash']
+
+
+class LockLocalStorage(object):
+ """
+ A class to help in locking a local path before being updated
+ """
+ def __init__(self, path):
+ self.path = path
+ self.lock = mkdirlockfile.MkdirLockFile(self.path, threaded=True)
+
+ def __enter__(self):
+ try:
+ self.lock.acquire(timeout=0.1)
+ except LockTimeout:
+ raise LibcloudError('Lock timeout')
+
+ def __exit__(self, type, value, traceback):
+ if self.lock.is_locked():
+ self.lock.release()
+
+ if value is not None:
+ raise value
+
+
+class LocalStorageDriver(StorageDriver):
+ """
+ Implementation of local file-system based storage. This is helpful
+ where the user would want to use the same code (using libcloud) and
+ switch between cloud storage and local storage
+ """
+
+ connectionCls = Connection
+ name = 'Local Storage'
+ website = 'http://example.com'
+ hash_type = 'md5'
+
+ def __init__(self, key, secret=None, secure=True, host=None, port=None,
+ **kwargs):
+
+ # Use the key as the path to the storage
+ self.base_path = key
+
+ if not os.path.isdir(self.base_path):
+ raise LibcloudError('The base path is not a directory')
+
+ super(StorageDriver, self).__init__(key=key, secret=secret,
+ secure=secure, host=host,
+ port=port, **kwargs)
+
+ def _make_path(self, path, ignore_existing=True):
+ """
+ Create a path by checking if it already exists
+ """
+
+ try:
+ os.makedirs(path)
+ except OSError:
+ exp = sys.exc_info()[1]
+ if exp.errno == errno.EEXIST and not ignore_existing:
+ raise exp
+
+ def _check_container_name(self, container_name):
+ """
+ Check if the container name is valid
+
+ :param container_name: Container name
+ :type container_name: ``str``
+ """
+
+ if '/' in container_name or '\\' in container_name:
+ raise InvalidContainerNameError(value=None, driver=self,
+ container_name=container_name)
+
+ def _make_container(self, container_name):
+ """
+ Create a container instance
+
+ :param container_name: Container name.
+ :type container_name: ``str``
+
+ :return: Container instance.
+ :rtype: :class:`Container`
+ """
+
+ self._check_container_name(container_name)
+
+ full_path = os.path.join(self.base_path, container_name)
+
+ try:
+ stat = os.stat(full_path)
+ if not os.path.isdir(full_path):
+ raise OSError('Target path is not a directory')
+ except OSError:
+ raise ContainerDoesNotExistError(value=None, driver=self,
+ container_name=container_name)
+
+ extra = {}
+ extra['creation_time'] = stat.st_ctime
+ extra['access_time'] = stat.st_atime
+ extra['modify_time'] = stat.st_mtime
+
+ return Container(name=container_name, extra=extra, driver=self)
+
+ def _make_object(self, container, object_name):
+ """
+ Create an object instance
+
+ :param container: Container.
+ :type container: :class:`Container`
+
+ :param object_name: Object name.
+ :type object_name: ``str``
+
+ :return: Object instance.
+ :rtype: :class:`Object`
+ """
+
+ full_path = os.path.join(self.base_path, container.name, object_name)
+
+ if os.path.isdir(full_path):
+ raise ObjectError(value=None, driver=self, object_name=object_name)
+
+ try:
+ stat = os.stat(full_path)
+ except Exception:
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=object_name)
+
+ # Make a hash for the file based on the metadata. We can safely
+ # use only the mtime attribute here. If the file contents change,
+ # the underlying file-system will change mtime
+ data_hash = self._get_hash_function()
+ data_hash.update(u(stat.st_mtime).encode('ascii'))
+ data_hash = data_hash.hexdigest()
+
+ extra = {}
+ extra['creation_time'] = stat.st_ctime
+ extra['access_time'] = stat.st_atime
+ extra['modify_time'] = stat.st_mtime
+
+ return Object(name=object_name, size=stat.st_size, extra=extra,
+ driver=self, container=container, hash=data_hash,
+ meta_data=None)
+
+ def iterate_containers(self):
+ """
+ Return a generator of containers.
+
+ :return: A generator of Container instances.
+ :rtype: ``generator`` of :class:`Container`
+ """
+
+ for container_name in os.listdir(self.base_path):
+ full_path = os.path.join(self.base_path, container_name)
+ if not os.path.isdir(full_path):
+ continue
+ yield self._make_container(container_name)
+
+ def _get_objects(self, container):
+ """
+ Recursively iterate through the file-system and return the object names
+ """
+
+ cpath = self.get_container_cdn_url(container, check=True)
+
+ for folder, subfolders, files in os.walk(cpath, topdown=True):
+ # Remove unwanted subfolders
+ for subf in IGNORE_FOLDERS:
+ if subf in subfolders:
+ subfolders.remove(subf)
+
+ for name in files:
+ full_path = os.path.join(folder, name)
+ object_name = relpath(full_path, start=cpath)
+ yield self._make_object(container, object_name)
+
+ def iterate_container_objects(self, container):
+ """
+ Returns a generator of objects for the given container.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :return: A generator of Object instances.
+ :rtype: ``generator`` of :class:`Object`
+ """
+
+ return self._get_objects(container)
+
+ def get_container(self, container_name):
+ """
+ Return a container instance.
+
+ :param container_name: Container name.
+ :type container_name: ``str``
+
+ :return: :class:`Container` instance.
+ :rtype: :class:`Container`
+ """
+ return self._make_container(container_name)
+
+ def get_container_cdn_url(self, container, check=False):
+ """
+ Return a container CDN URL.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param check: Indicates if the path's existence must be checked
+ :type check: ``bool``
+
+ :return: A CDN URL for this container.
+ :rtype: ``str``
+ """
+ path = os.path.join(self.base_path, container.name)
+
+ if check and not os.path.isdir(path):
+ raise ContainerDoesNotExistError(value=None, driver=self,
+ container_name=container.name)
+
+ return path
+
+ def get_object(self, container_name, object_name):
+ """
+ Return an object instance.
+
+ :param container_name: Container name.
+ :type container_name: ``str``
+
+ :param object_name: Object name.
+ :type object_name: ``str``
+
+ :return: :class:`Object` instance.
+ :rtype: :class:`Object`
+ """
+ container = self._make_container(container_name)
+ return self._make_object(container, object_name)
+
+ def get_object_cdn_url(self, obj):
+ """
+ Return a object CDN URL.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :return: A CDN URL for this object.
+ :rtype: ``str``
+ """
+ return os.path.join(self.base_path, obj.container.name, obj.name)
+
+ def enable_container_cdn(self, container):
+ """
+ Enable container CDN.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :rtype: ``bool``
+ """
+
+ path = self.get_container_cdn_url(container)
+ lockfile.MkdirFileLock(path, threaded=True)
+
+ with LockLocalStorage(path):
+ self._make_path(path)
+
+ return True
+
+ def enable_object_cdn(self, obj):
+ """
+ Enable object CDN.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :rtype: ``bool``
+ """
+ path = self.get_object_cdn_url(obj)
+
+ with LockLocalStorage(path):
+ if os.path.exists(path):
+ return False
+ try:
+ obj_file = open(path, 'w')
+ obj_file.close()
+ except:
+ return False
+
+ return True
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ """
+ Download an object to the specified destination path.
+
+ :param obj: Object instance.
+ :type obj: :class:`Object`
+
+ :param destination_path: Full path to a file or a directory where the
+ incoming file will be saved.
+ :type destination_path: ``str``
+
+ :param overwrite_existing: True to overwrite an existing file,
+ defaults to False.
+ :type overwrite_existing: ``bool``
+
+ :param delete_on_failure: True to delete a partially downloaded file if
+ the download was not successful (hash mismatch / file size).
+ :type delete_on_failure: ``bool``
+
+ :return: True if an object has been successfully downloaded, False
+ otherwise.
+ :rtype: ``bool``
+ """
+
+ obj_path = self.get_object_cdn_url(obj)
+ base_name = os.path.basename(destination_path)
+
+ if not base_name and not os.path.exists(destination_path):
+ raise LibcloudError(
+ value='Path %s does not exist' % (destination_path),
+ driver=self)
+
+ if not base_name:
+ file_path = os.path.join(destination_path, obj.name)
+ else:
+ file_path = destination_path
+
+ if os.path.exists(file_path) and not overwrite_existing:
+ raise LibcloudError(
+ value='File %s already exists, but ' % (file_path) +
+ 'overwrite_existing=False',
+ driver=self)
+
+ try:
+ shutil.copy(obj_path, file_path)
+ except IOError:
+ if delete_on_failure:
+ try:
+ os.unlink(file_path)
+ except Exception:
+ pass
+ return False
+
+ return True
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ """
+ Return a generator which yields object data.
+
+ :param obj: Object instance
+ :type obj: :class:`Object`
+
+ :param chunk_size: Optional chunk size (in bytes).
+ :type chunk_size: ``int``
+
+ :rtype: ``object``
+ """
+
+ path = self.get_object_cdn_url(obj)
+
+ with open(path) as obj_file:
+ for data in read_in_chunks(obj_file, chunk_size=chunk_size):
+ yield data
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True):
+ """
+ Upload an object currently located on a disk.
+
+ :param file_path: Path to the object on disk.
+ :type file_path: ``str``
+
+ :param container: Destination container.
+ :type container: :class:`Container`
+
+ :param object_name: Object name.
+ :type object_name: ``str``
+
+ :param verify_hash: Verify hast
+ :type verify_hash: ``bool``
+
+ :param extra: (optional) Extra attributes (driver specific).
+ :type extra: ``dict``
+
+ :rtype: ``object``
+ """
+
+ path = self.get_container_cdn_url(container, check=True)
+ obj_path = os.path.join(path, object_name)
+ base_path = os.path.dirname(obj_path)
+
+ self._make_path(base_path)
+
+ with LockLocalStorage(obj_path):
+ shutil.copy(file_path, obj_path)
+
+ os.chmod(obj_path, int('664', 8))
+
+ return self._make_object(container, object_name)
+
+ def upload_object_via_stream(self, iterator, container,
+ object_name,
+ extra=None):
+ """
+ Upload an object using an iterator.
+
+ If a provider supports it, chunked transfer encoding is used and you
+ don't need to know in advance the amount of data to be uploaded.
+
+ Otherwise if a provider doesn't support it, iterator will be exhausted
+ so a total size for data to be uploaded can be determined.
+
+ Note: Exhausting the iterator means that the whole data must be
+ buffered in memory which might result in memory exhausting when
+ uploading a very large object.
+
+ If a file is located on a disk you are advised to use upload_object
+ function which uses fs.stat function to determine the file size and it
+ doesn't need to buffer whole object in the memory.
+
+ :type iterator: ``object``
+ :param iterator: An object which implements the iterator interface.
+
+ :type container: :class:`Container`
+ :param container: Destination container.
+
+ :type object_name: ``str``
+ :param object_name: Object name.
+
+ :type extra: ``dict``
+ :param extra: (optional) Extra attributes (driver specific). Note:
+ This dictionary must contain a 'content_type' key which represents
+ a content type of the stored object.
+
+ :rtype: ``object``
+ """
+
+ path = self.get_container_cdn_url(container, check=True)
+ obj_path = os.path.join(path, object_name)
+ base_path = os.path.dirname(obj_path)
+
+ self._make_path(base_path)
+
+ with LockLocalStorage(obj_path):
+ obj_file = open(obj_path, 'w')
+ for data in iterator:
+ obj_file.write(data)
+
+ obj_file.close()
+
+ os.chmod(obj_path, int('664', 8))
+
+ return self._make_object(container, object_name)
+
+ def delete_object(self, obj):
+ """
+ Delete an object.
+
+ :type obj: :class:`Object`
+ :param obj: Object instance.
+
+ :return: ``bool`` True on success.
+ :rtype: ``bool``
+ """
+
+ path = self.get_object_cdn_url(obj)
+
+ with LockLocalStorage(path):
+ try:
+ os.unlink(path)
+ except Exception:
+ return False
+
+ # Check and delete all the empty parent folders
+ path = os.path.dirname(path)
+ container_url = obj.container.get_cdn_url()
+
+ # Delete the empty parent folders till the container's level
+ while path != container_url:
+ try:
+ os.rmdir(path)
+ except OSError:
+ exp = sys.exc_info()[1]
+ if exp.errno == errno.ENOTEMPTY:
+ break
+ raise exp
+
+ path = os.path.dirname(path)
+
+ return True
+
+ def create_container(self, container_name):
+ """
+ Create a new container.
+
+ :type container_name: ``str``
+ :param container_name: Container name.
+
+ :return: :class:`Container` instance on success.
+ :rtype: :class:`Container`
+ """
+
+ self._check_container_name(container_name)
+
+ path = os.path.join(self.base_path, container_name)
+
+ try:
+ self._make_path(path, ignore_existing=False)
+ except OSError:
+ exp = sys.exc_info()[1]
+ if exp.errno == errno.EEXIST:
+ raise ContainerAlreadyExistsError(
+ value='Container with this name already exists. The name '
+ 'must be unique among all the containers in the '
+ 'system',
+ container_name=container_name, driver=self)
+ else:
+ raise LibcloudError(
+ 'Error creating container %s' % container_name,
+ driver=self)
+ except Exception:
+ raise LibcloudError(
+ 'Error creating container %s' % container_name, driver=self)
+
+ return self._make_container(container_name)
+
+ def delete_container(self, container):
+ """
+ Delete a container.
+
+ :type container: :class:`Container`
+ :param container: Container instance
+
+ :return: True on success, False otherwise.
+ :rtype: ``bool``
+ """
+
+ # Check if there are any objects inside this
+ for obj in self._get_objects(container):
+ raise ContainerIsNotEmptyError(value='Container is not empty',
+ container_name=container.name,
+ driver=self)
+
+ path = self.get_container_cdn_url(container, check=True)
+
+ with LockLocalStorage(path):
+ try:
+ shutil.rmtree(path)
+ except Exception:
+ return False
+
+ return True
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/nimbus.py b/awx/lib/site-packages/libcloud/storage/drivers/nimbus.py
new file mode 100644
index 0000000000..b649e8b306
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/nimbus.py
@@ -0,0 +1,114 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import hashlib
+import hmac
+
+try:
+ import simplejson as json
+except ImportError:
+ import json # NOQA
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlencode
+
+from libcloud.common.base import ConnectionUserAndKey, JsonResponse
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.storage.base import Container, StorageDriver
+
+
+class NimbusResponse(JsonResponse):
+ valid_response_codes = [httplib.OK, httplib.NOT_FOUND, httplib.CONFLICT,
+ httplib.BAD_REQUEST]
+
+ def success(self):
+ return self.status in self.valid_response_codes
+
+ def parse_error(self):
+ if self.status in [httplib.UNAUTHORIZED]:
+ raise InvalidCredsError(self.body)
+ raise LibcloudError('Unknown error. Status code: %d' % (self.status),
+ driver=self.driver)
+
+
+class NimbusConnection(ConnectionUserAndKey):
+ host = 'nimbus.io'
+ responseCls = NimbusResponse
+
+ def __init__(self, *args, **kwargs):
+ self.id = kwargs.pop('id')
+ super(NimbusConnection, self).__init__(*args, **kwargs)
+
+ def pre_connect_hook(self, params, headers):
+ timestamp = str(int(time.time()))
+ signature = self._calculate_signature(user_id=self.user_id,
+ method=self.method,
+ params=params,
+ path=self.action,
+ timestamp=timestamp,
+ key=self.key)
+ headers['X-NIMBUS-IO-Timestamp'] = timestamp
+ headers['Authorization'] = 'NIMBUS.IO %s:%s' % (self.id, signature)
+ return params, headers
+
+ def _calculate_signature(self, user_id, method, params, path, timestamp,
+ key):
+ if params:
+ uri_path = path + '?' + urlencode(params)
+ else:
+ uri_path = path
+
+ string_to_sign = [user_id, method, str(timestamp), uri_path]
+ string_to_sign = '\n'.join(string_to_sign)
+
+ hmac_value = hmac.new(key, string_to_sign, hashlib.sha256)
+ return hmac_value.hexdigest()
+
+
+class NimbusStorageDriver(StorageDriver):
+ name = 'Nimbus.io'
+ website = 'https://nimbus.io/'
+ connectionCls = NimbusConnection
+
+ def __init__(self, *args, **kwargs):
+ self.user_id = kwargs['user_id']
+ super(NimbusStorageDriver, self).__init__(*args, **kwargs)
+
+ def iterate_containers(self):
+ response = self.connection.request('/customers/%s/collections' %
+ (self.connection.user_id))
+ return self._to_containers(response.object)
+
+ def create_container(self, container_name):
+ params = {'action': 'create', 'name': container_name}
+ response = self.connection.request('/customers/%s/collections' %
+ (self.connection.user_id),
+ params=params,
+ method='POST')
+ return self._to_container(response.object)
+
+ def _to_containers(self, data):
+ for item in data:
+ yield self._to_container(item)
+
+ def _to_container(self, data):
+ name = data[0]
+ extra = {'date_created': data[2]}
+ return Container(name=name, extra=extra, driver=self)
+
+ def _ex_connection_class_kwargs(self):
+ result = {'id': self.user_id}
+ return result
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/ninefold.py b/awx/lib/site-packages/libcloud/storage/drivers/ninefold.py
new file mode 100644
index 0000000000..fbdf567842
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/ninefold.py
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.storage.providers import Provider
+from libcloud.storage.drivers.atmos import AtmosDriver
+
+
+class NinefoldStorageDriver(AtmosDriver):
+ host = 'api.ninefold.com'
+ path = '/storage/v1.0'
+
+ type = Provider.NINEFOLD
+ name = 'Ninefold'
+ website = 'http://ninefold.com/'
diff --git a/awx/lib/site-packages/libcloud/storage/drivers/s3.py b/awx/lib/site-packages/libcloud/storage/drivers/s3.py
new file mode 100644
index 0000000000..9577f98369
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/drivers/s3.py
@@ -0,0 +1,975 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import copy
+import base64
+import hmac
+import sys
+
+from hashlib import sha1
+
+try:
+ from lxml.etree import Element, SubElement
+except ImportError:
+ from xml.etree.ElementTree import Element, SubElement
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlquote
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import tostring
+
+from libcloud.utils.xml import fixxpath, findtext
+from libcloud.utils.files import read_in_chunks
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.common.base import ConnectionUserAndKey, RawResponse
+from libcloud.common.aws import AWSBaseResponse, AWSDriver, AWSTokenConnection
+
+from libcloud.storage.base import Object, Container, StorageDriver
+from libcloud.storage.types import ContainerIsNotEmptyError
+from libcloud.storage.types import InvalidContainerNameError
+from libcloud.storage.types import ContainerDoesNotExistError
+from libcloud.storage.types import ObjectDoesNotExistError
+from libcloud.storage.types import ObjectHashMismatchError
+
+
+# How long before the token expires
+EXPIRATION_SECONDS = 15 * 60
+
+S3_US_STANDARD_HOST = 's3.amazonaws.com'
+S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com'
+S3_US_WEST_OREGON_HOST = 's3-us-west-2.amazonaws.com'
+S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com'
+S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com'
+S3_AP_NORTHEAST_HOST = 's3-ap-northeast-1.amazonaws.com'
+
+API_VERSION = '2006-03-01'
+NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION)
+
+# AWS multi-part chunks must be minimum 5MB
+CHUNK_SIZE = 5 * 1024 * 1024
+
+# Desired number of items in each response inside a paginated request in
+# ex_iterate_multipart_uploads.
+RESPONSES_PER_REQUEST = 100
+
+
+class S3Response(AWSBaseResponse):
+ namespace = None
+ valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
+ httplib.BAD_REQUEST]
+
+ def success(self):
+ i = int(self.status)
+ return i >= 200 and i <= 299 or i in self.valid_response_codes
+
+ def parse_error(self):
+ if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
+ raise InvalidCredsError(self.body)
+ elif self.status == httplib.MOVED_PERMANENTLY:
+ raise LibcloudError('This bucket is located in a different ' +
+ 'region. Please use the correct driver.',
+ driver=S3StorageDriver)
+ raise LibcloudError('Unknown error. Status code: %d' % (self.status),
+ driver=S3StorageDriver)
+
+
+class S3RawResponse(S3Response, RawResponse):
+ pass
+
+
+class BaseS3Connection(ConnectionUserAndKey):
+ """
+ Represents a single connection to the S3 Endpoint
+ """
+
+ host = 's3.amazonaws.com'
+ responseCls = S3Response
+ rawResponseCls = S3RawResponse
+
+ def add_default_params(self, params):
+ expires = str(int(time.time()) + EXPIRATION_SECONDS)
+ params['AWSAccessKeyId'] = self.user_id
+ params['Expires'] = expires
+ return params
+
+ def pre_connect_hook(self, params, headers):
+ params['Signature'] = self._get_aws_auth_param(
+ method=self.method, headers=headers, params=params,
+ expires=params['Expires'], secret_key=self.key, path=self.action)
+ return params, headers
+
+ def _get_aws_auth_param(self, method, headers, params, expires,
+ secret_key, path='/'):
+ """
+ Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
+ UTF-8-Encoding-Of( StringToSign ) ) ) );
+
+ StringToSign = HTTP-VERB + "\n" +
+ Content-MD5 + "\n" +
+ Content-Type + "\n" +
+ Expires + "\n" +
+ CanonicalizedAmzHeaders +
+ CanonicalizedResource;
+ """
+ special_header_keys = ['content-md5', 'content-type', 'date']
+ special_header_values = {'date': ''}
+ amz_header_values = {}
+
+ headers_copy = copy.deepcopy(headers)
+ for key, value in list(headers_copy.items()):
+ key_lower = key.lower()
+ if key_lower in special_header_keys:
+ special_header_values[key_lower] = value.strip()
+ elif key_lower.startswith('x-amz-'):
+ amz_header_values[key.lower()] = value.strip()
+
+ if 'content-md5' not in special_header_values:
+ special_header_values['content-md5'] = ''
+
+ if 'content-type' not in special_header_values:
+ special_header_values['content-type'] = ''
+
+ if expires:
+ special_header_values['date'] = str(expires)
+
+ keys_sorted = list(special_header_values.keys())
+ keys_sorted.sort()
+
+ buf = [method]
+ for key in keys_sorted:
+ value = special_header_values[key]
+ buf.append(value)
+ string_to_sign = '\n'.join(buf)
+
+ keys_sorted = list(amz_header_values.keys())
+ keys_sorted.sort()
+
+ amz_header_string = []
+ for key in keys_sorted:
+ value = amz_header_values[key]
+ amz_header_string.append('%s:%s' % (key, value))
+ amz_header_string = '\n'.join(amz_header_string)
+
+ values_to_sign = []
+ for value in [string_to_sign, amz_header_string, path]:
+ if value:
+ values_to_sign.append(value)
+
+ string_to_sign = '\n'.join(values_to_sign)
+ b64_hmac = base64.b64encode(
+ hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
+ )
+ return b64_hmac.decode('utf-8')
+
+
+class S3Connection(AWSTokenConnection, BaseS3Connection):
+ """
+ Represents a single connection to the S3 endpoint, with AWS-specific
+ features.
+ """
+ pass
+
+
+class S3MultipartUpload(object):
+ """
+ Class representing an amazon s3 multipart upload
+ """
+
+ def __init__(self, key, id, created_at, initiator, owner):
+ """
+ Class representing an amazon s3 multipart upload
+
+ :param key: The object/key that was being uploaded
+ :type key: ``str``
+
+ :param id: The upload id assigned by amazon
+ :type id: ``str``
+
+ :param created_at: The date/time at which the upload was started
+ :type created_at: ``str``
+
+ :param initiator: The AWS owner/IAM user who initiated this
+ :type initiator: ``str``
+
+ :param owner: The AWS owner/IAM who will own this object
+ :type owner: ``str``
+ """
+ self.key = key
+ self.id = id
+ self.created_at = created_at
+ self.initiator = initiator
+ self.owner = owner
+
+ def __repr__(self):
+ return ('' % (self.key))
+
+
+class BaseS3StorageDriver(StorageDriver):
+ name = 'Amazon S3 (standard)'
+ website = 'http://aws.amazon.com/s3/'
+ connectionCls = BaseS3Connection
+ hash_type = 'md5'
+ supports_chunked_encoding = False
+ supports_s3_multipart_upload = True
+ ex_location_name = ''
+ namespace = NAMESPACE
+
+ def iterate_containers(self):
+ response = self.connection.request('/')
+ if response.status == httplib.OK:
+ containers = self._to_containers(obj=response.object,
+ xpath='Buckets/Bucket')
+ return containers
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status),
+ driver=self)
+
+ def list_container_objects(self, container, ex_prefix=None):
+ """
+ Return a list of objects for the given container.
+
+ :param container: Container instance.
+ :type container: :class:`Container`
+
+ :param ex_prefix: Only return objects starting with ex_prefix
+ :type ex_prefix: ``str``
+
+ :return: A list of Object instances.
+ :rtype: ``list`` of :class:`Object`
+ """
+ return list(self.iterate_container_objects(container,
+ ex_prefix=ex_prefix))
+
+ def iterate_container_objects(self, container, ex_prefix=None):
+ """
+ Return a generator of objects for the given container.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param ex_prefix: Only return objects starting with ex_prefix
+ :type ex_prefix: ``str``
+
+ :return: A generator of Object instances.
+ :rtype: ``generator`` of :class:`Object`
+ """
+ params = {}
+ if ex_prefix:
+ params['prefix'] = ex_prefix
+
+ last_key = None
+ exhausted = False
+ container_path = self._get_container_path(container)
+
+ while not exhausted:
+ if last_key:
+ params['marker'] = last_key
+
+ response = self.connection.request(container_path,
+ params=params)
+
+ if response.status != httplib.OK:
+ raise LibcloudError('Unexpected status code: %s' %
+ (response.status), driver=self)
+
+ objects = self._to_objs(obj=response.object,
+ xpath='Contents', container=container)
+ is_truncated = response.object.findtext(fixxpath(
+ xpath='IsTruncated', namespace=self.namespace)).lower()
+ exhausted = (is_truncated == 'false')
+
+ last_key = None
+ for obj in objects:
+ last_key = obj.name
+ yield obj
+
+ def get_container(self, container_name):
+ try:
+ response = self.connection.request('/%s' % container_name,
+ method='HEAD')
+ if response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value=None, driver=self,
+ container_name=container_name)
+ except InvalidCredsError:
+ # This just means the user doesn't have IAM permissions to do a
+ # HEAD request but other requests might work.
+ pass
+ return Container(name=container_name, extra=None, driver=self)
+
+ def get_object(self, container_name, object_name):
+ container = self.get_container(container_name=container_name)
+ object_path = self._get_object_path(container, object_name)
+ response = self.connection.request(object_path, method='HEAD')
+
+ if response.status == httplib.OK:
+ obj = self._headers_to_object(object_name=object_name,
+ container=container,
+ headers=response.headers)
+ return obj
+
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=object_name)
+
+ def _get_container_path(self, container):
+ """
+ Return a container path
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :return: A path for this container.
+ :rtype: ``str``
+ """
+ return '/%s' % (container.name)
+
+ def _get_object_path(self, container, object_name):
+ """
+ Return an object's CDN path.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param object_name: Object name
+ :type object_name: :class:`str`
+
+ :return: A path for this object.
+ :rtype: ``str``
+ """
+ container_url = self._get_container_path(container)
+ object_name_cleaned = self._clean_object_name(object_name)
+ object_path = '%s/%s' % (container_url, object_name_cleaned)
+ return object_path
+
+ def create_container(self, container_name):
+ if self.ex_location_name:
+ root = Element('CreateBucketConfiguration')
+ child = SubElement(root, 'LocationConstraint')
+ child.text = self.ex_location_name
+
+ data = tostring(root)
+ else:
+ data = ''
+
+ response = self.connection.request('/%s' % (container_name),
+ data=data,
+ method='PUT')
+
+ if response.status == httplib.OK:
+ container = Container(name=container_name, extra=None, driver=self)
+ return container
+ elif response.status == httplib.CONFLICT:
+ raise InvalidContainerNameError(
+ value='Container with this name already exists. The name must '
+ 'be unique among all the containers in the system',
+ container_name=container_name, driver=self)
+ elif response.status == httplib.BAD_REQUEST:
+ raise InvalidContainerNameError(value='Container name contains ' +
+ 'invalid characters.',
+ container_name=container_name,
+ driver=self)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status),
+ driver=self)
+
+ def delete_container(self, container):
+ # Note: All the objects in the container must be deleted first
+ response = self.connection.request('/%s' % (container.name),
+ method='DELETE')
+ if response.status == httplib.NO_CONTENT:
+ return True
+ elif response.status == httplib.CONFLICT:
+ raise ContainerIsNotEmptyError(
+ value='Container must be empty before it can be deleted.',
+ container_name=container.name, driver=self)
+ elif response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value=None,
+ driver=self,
+ container_name=container.name)
+
+ return False
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ obj_path = self._get_object_path(obj.container, obj.name)
+
+ response = self.connection.request(obj_path, method='GET', raw=True)
+
+ return self._get_object(obj=obj, callback=self._save_object,
+ response=response,
+ callback_kwargs={
+ 'obj': obj,
+ 'response': response.response,
+ 'destination_path': destination_path,
+ 'overwrite_existing': overwrite_existing,
+ 'delete_on_failure': delete_on_failure},
+ success_status_code=httplib.OK)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ obj_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(obj_path, method='GET', raw=True)
+
+ return self._get_object(obj=obj, callback=read_in_chunks,
+ response=response,
+ callback_kwargs={'iterator': response.response,
+ 'chunk_size': chunk_size},
+ success_status_code=httplib.OK)
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True, ex_storage_class=None):
+ """
+ @inherits: :class:`StorageDriver.upload_object`
+
+ :param ex_storage_class: Storage class
+ :type ex_storage_class: ``str``
+ """
+ upload_func = self._upload_file
+ upload_func_kwargs = {'file_path': file_path}
+
+ return self._put_object(container=container, object_name=object_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, file_path=file_path,
+ verify_hash=verify_hash,
+ storage_class=ex_storage_class)
+
+ def _upload_multipart(self, response, data, iterator, container,
+ object_name, calculate_hash=True):
+ """
+ Callback invoked for uploading data to S3 using Amazon's
+ multipart upload mechanism
+
+ :param response: Response object from the initial POST request
+ :type response: :class:`S3RawResponse`
+
+ :param data: Any data from the initial POST request
+ :type data: ``str``
+
+ :param iterator: The generator for fetching the upload data
+ :type iterator: ``generator``
+
+ :param container: The container owning the object to which data is
+ being uploaded
+ :type container: :class:`Container`
+
+ :param object_name: The name of the object to which we are uploading
+ :type object_name: ``str``
+
+ :keyword calculate_hash: Indicates if we must calculate the data hash
+ :type calculate_hash: ``bool``
+
+ :return: A tuple of (status, checksum, bytes transferred)
+ :rtype: ``tuple``
+ """
+
+ object_path = self._get_object_path(container, object_name)
+
+ # Get the upload id from the response xml
+ response.body = response.response.read()
+ body = response.parse_body()
+ upload_id = body.find(fixxpath(xpath='UploadId',
+ namespace=self.namespace)).text
+
+ try:
+ # Upload the data through the iterator
+ result = self._upload_from_iterator(iterator, object_path,
+ upload_id, calculate_hash)
+ (chunks, data_hash, bytes_transferred) = result
+
+ # Commit the chunk info and complete the upload
+ etag = self._commit_multipart(object_path, upload_id, chunks)
+ except Exception:
+ exc = sys.exc_info()[1]
+ # Amazon provides a mechanism for aborting an upload.
+ self._abort_multipart(object_path, upload_id)
+ raise exc
+
+ # Modify the response header of the first request. This is used
+ # by other functions once the callback is done
+ response.headers['etag'] = etag
+
+ return (True, data_hash, bytes_transferred)
+
+ def _upload_from_iterator(self, iterator, object_path, upload_id,
+ calculate_hash=True):
+ """
+ Uploads data from an interator in fixed sized chunks to S3
+
+ :param iterator: The generator for fetching the upload data
+ :type iterator: ``generator``
+
+ :param object_path: The path of the object to which we are uploading
+ :type object_name: ``str``
+
+ :param upload_id: The upload id allocated for this multipart upload
+ :type upload_id: ``str``
+
+ :keyword calculate_hash: Indicates if we must calculate the data hash
+ :type calculate_hash: ``bool``
+
+ :return: A tuple of (chunk info, checksum, bytes transferred)
+ :rtype: ``tuple``
+ """
+
+ data_hash = None
+ if calculate_hash:
+ data_hash = self._get_hash_function()
+
+ bytes_transferred = 0
+ count = 1
+ chunks = []
+ params = {'uploadId': upload_id}
+
+ # Read the input data in chunk sizes suitable for AWS
+ for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE,
+ fill_size=True, yield_empty=True):
+ bytes_transferred += len(data)
+
+ if calculate_hash:
+ data_hash.update(data)
+
+ chunk_hash = self._get_hash_function()
+ chunk_hash.update(data)
+ chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8')
+
+ # This provides an extra level of data check and is recommended
+ # by amazon
+ headers = {'Content-MD5': chunk_hash}
+ params['partNumber'] = count
+
+ request_path = '?'.join((object_path, urlencode(params)))
+
+ resp = self.connection.request(request_path, method='PUT',
+ data=data, headers=headers)
+
+ if resp.status != httplib.OK:
+ raise LibcloudError('Error uploading chunk', driver=self)
+
+ server_hash = resp.headers['etag']
+
+ # Keep this data for a later commit
+ chunks.append((count, server_hash))
+ count += 1
+
+ if calculate_hash:
+ data_hash = data_hash.hexdigest()
+
+ return (chunks, data_hash, bytes_transferred)
+
+ def _commit_multipart(self, object_path, upload_id, chunks):
+ """
+ Makes a final commit of the data.
+
+ :param object_path: Server side object path.
+ :type object_path: ``str``
+
+ :param upload_id: ID of the multipart upload.
+ :type upload_id: ``str``
+
+ :param upload_id: A list of (chunk_number, chunk_hash) tuples.
+ :type upload_id: ``list``
+ """
+
+ root = Element('CompleteMultipartUpload')
+
+ for (count, etag) in chunks:
+ part = SubElement(root, 'Part')
+ part_no = SubElement(part, 'PartNumber')
+ part_no.text = str(count)
+
+ etag_id = SubElement(part, 'ETag')
+ etag_id.text = str(etag)
+
+ data = tostring(root)
+
+ params = {'uploadId': upload_id}
+ request_path = '?'.join((object_path, urlencode(params)))
+ response = self.connection.request(request_path, data=data,
+ method='POST')
+
+ if response.status != httplib.OK:
+ element = response.object
+ code, message = response._parse_error_details(element=element)
+ msg = 'Error in multipart commit: %s (%s)' % (message, code)
+ raise LibcloudError(msg, driver=self)
+
+ # Get the server's etag to be passed back to the caller
+ body = response.parse_body()
+ server_hash = body.find(fixxpath(xpath='ETag',
+ namespace=self.namespace)).text
+ return server_hash
+
+ def _abort_multipart(self, object_path, upload_id):
+ """
+ Aborts an already initiated multipart upload
+
+ :param object_path: Server side object path.
+ :type object_path: ``str``
+
+ :param upload_id: ID of the multipart upload.
+ :type upload_id: ``str``
+ """
+
+ params = {'uploadId': upload_id}
+ request_path = '?'.join((object_path, urlencode(params)))
+ resp = self.connection.request(request_path, method='DELETE')
+
+ if resp.status != httplib.NO_CONTENT:
+ raise LibcloudError('Error in multipart abort. status_code=%d' %
+ (resp.status), driver=self)
+
+ def upload_object_via_stream(self, iterator, container, object_name,
+ extra=None, ex_storage_class=None):
+ """
+ @inherits: :class:`StorageDriver.upload_object_via_stream`
+
+ :param ex_storage_class: Storage class
+ :type ex_storage_class: ``str``
+ """
+
+ method = 'PUT'
+ params = None
+
+ # This driver is used by other S3 API compatible drivers also.
+ # Amazon provides a different (complex?) mechanism to do multipart
+ # uploads
+ if self.supports_s3_multipart_upload:
+ # Initiate the multipart request and get an upload id
+ upload_func = self._upload_multipart
+ upload_func_kwargs = {'iterator': iterator,
+ 'container': container,
+ 'object_name': object_name}
+ method = 'POST'
+ iterator = iter('')
+ params = 'uploads'
+
+ elif self.supports_chunked_encoding:
+ upload_func = self._stream_data
+ upload_func_kwargs = {'iterator': iterator}
+ else:
+ # In this case, we have to load the entire object to
+ # memory and send it as normal data
+ upload_func = self._upload_data
+ upload_func_kwargs = {}
+
+ return self._put_object(container=container, object_name=object_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, method=method, query_args=params,
+ iterator=iterator, verify_hash=False,
+ storage_class=ex_storage_class)
+
+ def delete_object(self, obj):
+ object_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(object_path, method='DELETE')
+ if response.status == httplib.NO_CONTENT:
+ return True
+ elif response.status == httplib.NOT_FOUND:
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=obj.name)
+
+ return False
+
+ def ex_iterate_multipart_uploads(self, container, prefix=None,
+ delimiter=None):
+ """
+ Extension method for listing all in-progress S3 multipart uploads.
+
+ Each multipart upload which has not been committed or aborted is
+ considered in-progress.
+
+ :param container: The container holding the uploads
+ :type container: :class:`Container`
+
+ :keyword prefix: Print only uploads of objects with this prefix
+ :type prefix: ``str``
+
+ :keyword delimiter: The object/key names are grouped based on
+ being split by this delimiter
+ :type delimiter: ``str``
+
+ :return: A generator of S3MultipartUpload instances.
+ :rtype: ``generator`` of :class:`S3MultipartUpload`
+ """
+
+ if not self.supports_s3_multipart_upload:
+ raise LibcloudError('Feature not supported', driver=self)
+
+ # Get the data for a specific container
+ request_path = '%s/?uploads' % (self._get_container_path(container))
+ params = {'max-uploads': RESPONSES_PER_REQUEST}
+
+ if prefix:
+ params['prefix'] = prefix
+
+ if delimiter:
+ params['delimiter'] = delimiter
+
+ finder = lambda node, text: node.findtext(fixxpath(xpath=text,
+ namespace=self.namespace))
+
+ while True:
+ response = self.connection.request(request_path, params=params)
+
+ if response.status != httplib.OK:
+ raise LibcloudError('Error fetching multipart uploads. '
+ 'Got code: %s' % (response.status),
+ driver=self)
+
+ body = response.parse_body()
+ for node in body.findall(fixxpath(xpath='Upload',
+ namespace=self.namespace)):
+
+ initiator = node.find(fixxpath(xpath='Initiator',
+ namespace=self.namespace))
+ owner = node.find(fixxpath(xpath='Owner',
+ namespace=self.namespace))
+
+ key = finder(node, 'Key')
+ upload_id = finder(node, 'UploadId')
+ created_at = finder(node, 'Initiated')
+ initiator = finder(initiator, 'DisplayName')
+ owner = finder(owner, 'DisplayName')
+
+ yield S3MultipartUpload(key, upload_id, created_at,
+ initiator, owner)
+
+ # Check if this is the last entry in the listing
+ is_truncated = body.findtext(fixxpath(xpath='IsTruncated',
+ namespace=self.namespace))
+
+ if is_truncated.lower() == 'false':
+ break
+
+ # Provide params for the next request
+ upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker',
+ namespace=self.namespace))
+ key_marker = body.findtext(fixxpath(xpath='NextKeyMarker',
+ namespace=self.namespace))
+
+ params['key-marker'] = key_marker
+ params['upload-id-marker'] = upload_marker
+
+ def ex_cleanup_all_multipart_uploads(self, container, prefix=None):
+ """
+ Extension method for removing all partially completed S3 multipart
+ uploads.
+
+ :param container: The container holding the uploads
+ :type container: :class:`Container`
+
+ :keyword prefix: Delete only uploads of objects with this prefix
+ :type prefix: ``str``
+ """
+
+ # Iterate through the container and delete the upload ids
+ for upload in self.ex_iterate_multipart_uploads(container, prefix,
+ delimiter=None):
+ object_path = '/%s/%s' % (container.name, upload.key)
+ self._abort_multipart(object_path, upload.id)
+
+ def _clean_object_name(self, name):
+ name = urlquote(name)
+ return name
+
+ def _put_object(self, container, object_name, upload_func,
+ upload_func_kwargs, method='PUT', query_args=None,
+ extra=None, file_path=None, iterator=None,
+ verify_hash=True, storage_class=None):
+ headers = {}
+ extra = extra or {}
+ storage_class = storage_class or 'standard'
+ if storage_class not in ['standard', 'reduced_redundancy']:
+ raise ValueError(
+ 'Invalid storage class value: %s' % (storage_class))
+
+ headers['x-amz-storage-class'] = storage_class.upper()
+
+ content_type = extra.get('content_type', None)
+ meta_data = extra.get('meta_data', None)
+ acl = extra.get('acl', None)
+
+ if meta_data:
+ for key, value in list(meta_data.items()):
+ key = 'x-amz-meta-%s' % (key)
+ headers[key] = value
+
+ if acl:
+ headers['x-amz-acl'] = acl
+
+ request_path = self._get_object_path(container, object_name)
+
+ if query_args:
+ request_path = '?'.join((request_path, query_args))
+
+ # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE
+ # here.
+ # SIGPIPE is thrown if the provided container does not exist or the
+ # user does not have correct permission
+ result_dict = self._upload_object(
+ object_name=object_name, content_type=content_type,
+ upload_func=upload_func, upload_func_kwargs=upload_func_kwargs,
+ request_path=request_path, request_method=method,
+ headers=headers, file_path=file_path, iterator=iterator)
+
+ response = result_dict['response']
+ bytes_transferred = result_dict['bytes_transferred']
+ headers = response.headers
+ response = response.response
+ server_hash = headers['etag'].replace('"', '')
+
+ if (verify_hash and result_dict['data_hash'] != server_hash):
+ raise ObjectHashMismatchError(
+ value='MD5 hash checksum does not match',
+ object_name=object_name, driver=self)
+ elif response.status == httplib.OK:
+ obj = Object(
+ name=object_name, size=bytes_transferred, hash=server_hash,
+ extra={'acl': acl}, meta_data=meta_data, container=container,
+ driver=self)
+
+ return obj
+ else:
+ raise LibcloudError(
+ 'Unexpected status code, status_code=%s' % (response.status),
+ driver=self)
+
+ def _to_containers(self, obj, xpath):
+ for element in obj.findall(fixxpath(xpath=xpath,
+ namespace=self.namespace)):
+ yield self._to_container(element)
+
+ def _to_objs(self, obj, xpath, container):
+ return [self._to_obj(element, container) for element in
+ obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))]
+
+ def _to_container(self, element):
+ extra = {
+ 'creation_date': findtext(element=element, xpath='CreationDate',
+ namespace=self.namespace)
+ }
+
+ container = Container(name=findtext(element=element, xpath='Name',
+ namespace=self.namespace),
+ extra=extra,
+ driver=self
+ )
+
+ return container
+
+ def _headers_to_object(self, object_name, container, headers):
+ hash = headers['etag'].replace('"', '')
+ extra = {'content_type': headers['content-type'],
+ 'etag': headers['etag']}
+ meta_data = {}
+
+ if 'last-modified' in headers:
+ extra['last_modified'] = headers['last-modified']
+
+ for key, value in headers.items():
+ if not key.lower().startswith('x-amz-meta-'):
+ continue
+
+ key = key.replace('x-amz-meta-', '')
+ meta_data[key] = value
+
+ obj = Object(name=object_name, size=headers['content-length'],
+ hash=hash, extra=extra,
+ meta_data=meta_data,
+ container=container,
+ driver=self)
+ return obj
+
+ def _to_obj(self, element, container):
+ owner_id = findtext(element=element, xpath='Owner/ID',
+ namespace=self.namespace)
+ owner_display_name = findtext(element=element,
+ xpath='Owner/DisplayName',
+ namespace=self.namespace)
+ meta_data = {'owner': {'id': owner_id,
+ 'display_name': owner_display_name}}
+ last_modified = findtext(element=element,
+ xpath='LastModified',
+ namespace=self.namespace)
+ extra = {'last_modified': last_modified}
+
+ obj = Object(name=findtext(element=element, xpath='Key',
+ namespace=self.namespace),
+ size=int(findtext(element=element, xpath='Size',
+ namespace=self.namespace)),
+ hash=findtext(element=element, xpath='ETag',
+ namespace=self.namespace).replace('"', ''),
+ extra=extra,
+ meta_data=meta_data,
+ container=container,
+ driver=self
+ )
+
+ return obj
+
+
+class S3StorageDriver(AWSDriver, BaseS3StorageDriver):
+ connectionCls = S3Connection
+
+
+class S3USWestConnection(S3Connection):
+ host = S3_US_WEST_HOST
+
+
+class S3USWestStorageDriver(S3StorageDriver):
+ name = 'Amazon S3 (us-west-1)'
+ connectionCls = S3USWestConnection
+ ex_location_name = 'us-west-1'
+
+
+class S3USWestOregonConnection(S3Connection):
+ host = S3_US_WEST_OREGON_HOST
+
+
+class S3USWestOregonStorageDriver(S3StorageDriver):
+ name = 'Amazon S3 (us-west-2)'
+ connectionCls = S3USWestOregonConnection
+ ex_location_name = 'us-west-2'
+
+
+class S3EUWestConnection(S3Connection):
+ host = S3_EU_WEST_HOST
+
+
+class S3EUWestStorageDriver(S3StorageDriver):
+ name = 'Amazon S3 (eu-west-1)'
+ connectionCls = S3EUWestConnection
+ ex_location_name = 'EU'
+
+
+class S3APSEConnection(S3Connection):
+ host = S3_AP_SOUTHEAST_HOST
+
+
+class S3APSEStorageDriver(S3StorageDriver):
+ name = 'Amazon S3 (ap-southeast-1)'
+ connectionCls = S3APSEConnection
+ ex_location_name = 'ap-southeast-1'
+
+
+class S3APNEConnection(S3Connection):
+ host = S3_AP_NORTHEAST_HOST
+
+
+class S3APNEStorageDriver(S3StorageDriver):
+ name = 'Amazon S3 (ap-northeast-1)'
+ connectionCls = S3APNEConnection
+ ex_location_name = 'ap-northeast-1'
diff --git a/awx/lib/site-packages/libcloud/storage/providers.py b/awx/lib/site-packages/libcloud/storage/providers.py
new file mode 100644
index 0000000000..1be212e15b
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/providers.py
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.utils.misc import get_driver as get_provider_driver
+from libcloud.utils.misc import set_driver as set_provider_driver
+from libcloud.storage.types import Provider
+
+DRIVERS = {
+ Provider.DUMMY:
+ ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'),
+ Provider.CLOUDFILES:
+ ('libcloud.storage.drivers.cloudfiles', 'CloudFilesStorageDriver'),
+ Provider.OPENSTACK_SWIFT:
+ ('libcloud.storage.drivers.cloudfiles', 'OpenStackSwiftStorageDriver'),
+ Provider.S3:
+ ('libcloud.storage.drivers.s3', 'S3StorageDriver'),
+ Provider.S3_US_WEST:
+ ('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'),
+ Provider.S3_US_WEST_OREGON:
+ ('libcloud.storage.drivers.s3', 'S3USWestOregonStorageDriver'),
+ Provider.S3_EU_WEST:
+ ('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'),
+ Provider.S3_AP_SOUTHEAST:
+ ('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'),
+ Provider.S3_AP_NORTHEAST:
+ ('libcloud.storage.drivers.s3', 'S3APNEStorageDriver'),
+ Provider.NINEFOLD:
+ ('libcloud.storage.drivers.ninefold', 'NinefoldStorageDriver'),
+ Provider.GOOGLE_STORAGE:
+ ('libcloud.storage.drivers.google_storage', 'GoogleStorageDriver'),
+ Provider.NIMBUS:
+ ('libcloud.storage.drivers.nimbus', 'NimbusStorageDriver'),
+ Provider.LOCAL:
+ ('libcloud.storage.drivers.local', 'LocalStorageDriver'),
+ Provider.AZURE_BLOBS:
+ ('libcloud.storage.drivers.azure_blobs', 'AzureBlobsStorageDriver'),
+ Provider.KTUCLOUD:
+ ('libcloud.storage.drivers.ktucloud', 'KTUCloudStorageDriver'),
+
+ # Deprecated
+ Provider.CLOUDFILES_US:
+ ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'),
+ Provider.CLOUDFILES_UK:
+ ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver'),
+ Provider.CLOUDFILES_SWIFT:
+ ('libcloud.storage.drivers.cloudfiles', 'OpenStackSwiftStorageDriver')
+}
+
+
+def get_driver(provider):
+ return get_provider_driver(DRIVERS, provider)
+
+
+def set_driver(provider, module, klass):
+ return set_provider_driver(DRIVERS, provider, module, klass)
diff --git a/awx/lib/site-packages/libcloud/storage/types.py b/awx/lib/site-packages/libcloud/storage/types.py
new file mode 100644
index 0000000000..ddc80351a0
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/storage/types.py
@@ -0,0 +1,119 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.common.types import LibcloudError
+
+__all__ = ['Provider',
+ 'ContainerError',
+ 'ObjectError',
+ 'ContainerAlreadyExistsError',
+ 'ContainerDoesNotExistError',
+ 'ContainerIsNotEmptyError',
+ 'ObjectDoesNotExistError',
+ 'ObjectHashMismatchError',
+ 'InvalidContainerNameError']
+
+
+class Provider(object):
+ """
+ Defines for each of the supported providers
+
+ :cvar DUMMY: Example provider
+ :cvar CLOUDFILES: CloudFiles
+ :cvar S3: Amazon S3 US
+ :cvar S3_US_WEST: Amazon S3 US West (Northern California)
+ :cvar S3_EU_WEST: Amazon S3 EU West (Ireland)
+ :cvar S3_AP_SOUTHEAST_HOST: Amazon S3 Asia South East (Singapore)
+ :cvar S3_AP_NORTHEAST_HOST: Amazon S3 Asia South East (Tokyo)
+ :cvar NINEFOLD: Ninefold
+ :cvar GOOGLE_STORAGE Google Storage
+ :cvar S3_US_WEST_OREGON: Amazon S3 US West 2 (Oregon)
+ :cvar NIMBUS: Nimbus.io driver
+ :cvar LOCAL: Local storage driver
+ """
+ DUMMY = 'dummy'
+ S3 = 's3'
+ S3_US_WEST = 's3_us_west'
+ S3_EU_WEST = 's3_eu_west'
+ S3_AP_SOUTHEAST = 's3_ap_southeast'
+ S3_AP_NORTHEAST = 's3_ap_northeast'
+ NINEFOLD = 'ninefold'
+ GOOGLE_STORAGE = 'google_storage'
+ S3_US_WEST_OREGON = 's3_us_west_oregon'
+ NIMBUS = 'nimbus'
+ LOCAL = 'local'
+ OPENSTACK_SWIFT = 'openstack_swift'
+ CLOUDFILES = 'cloudfiles'
+ AZURE_BLOBS = 'azure_blobs'
+ KTUCLOUD = 'ktucloud'
+
+ # Deperecated
+ CLOUDFILES_US = 'cloudfiles_us'
+ CLOUDFILES_UK = 'cloudfiles_uk'
+ CLOUDFILES_SWIFT = 'cloudfiles_swift'
+
+
+class ContainerError(LibcloudError):
+ error_type = 'ContainerError'
+
+ def __init__(self, value, driver, container_name):
+ self.container_name = container_name
+ super(ContainerError, self).__init__(value=value, driver=driver)
+
+ def __str__(self):
+ return ('<%s in %s, container=%s, value=%s>' %
+ (self.error_type, repr(self.driver),
+ self.container_name, self.value))
+
+
+class ObjectError(LibcloudError):
+ error_type = 'ContainerError'
+
+ def __init__(self, value, driver, object_name):
+ self.object_name = object_name
+ super(ObjectError, self).__init__(value=value, driver=driver)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __repr__(self):
+ return '<%s in %s, value=%s, object = %s>' % (self.error_type,
+ repr(self.driver),
+ self.value,
+ self.object_name)
+
+
+class ContainerAlreadyExistsError(ContainerError):
+ error_type = 'ContainerAlreadyExistsError'
+
+
+class ContainerDoesNotExistError(ContainerError):
+ error_type = 'ContainerDoesNotExistError'
+
+
+class ContainerIsNotEmptyError(ContainerError):
+ error_type = 'ContainerIsNotEmptyError'
+
+
+class ObjectDoesNotExistError(ObjectError):
+ error_type = 'ObjectDoesNotExistError'
+
+
+class ObjectHashMismatchError(ObjectError):
+ error_type = 'ObjectHashMismatchError'
+
+
+class InvalidContainerNameError(ContainerError):
+ error_type = 'InvalidContainerNameError'
diff --git a/awx/lib/site-packages/libcloud/test/__init__.py b/awx/lib/site-packages/libcloud/test/__init__.py
new file mode 100644
index 0000000000..3a701cdb67
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/__init__.py
@@ -0,0 +1,345 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import random
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import StringIO
+from libcloud.utils.py3 import urlparse
+from libcloud.utils.py3 import parse_qs
+from libcloud.utils.py3 import parse_qsl
+from libcloud.utils.py3 import u
+from libcloud.utils.py3 import unittest2_required
+
+if unittest2_required:
+ import unittest2 as unittest
+else:
+ import unittest
+
+
+XML_HEADERS = {'content-type': 'application/xml'}
+
+
+class LibcloudTestCase(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ self._visited_urls = []
+ self._executed_mock_methods = []
+ super(LibcloudTestCase, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ self._visited_urls = []
+ self._executed_mock_methods = []
+
+ def _add_visited_url(self, url):
+ self._visited_urls.append(url)
+
+ def _add_executed_mock_method(self, method_name):
+ self._executed_mock_methods.append(method_name)
+
+ def assertExecutedMethodCount(self, expected):
+ actual = len(self._executed_mock_methods)
+ self.assertEqual(actual, expected,
+ 'expected %d, but %d mock methods were executed'
+ % (expected, actual))
+
+
+class multipleresponse(object):
+ """
+ A decorator that allows MockHttp objects to return multi responses
+ """
+ count = 0
+ func = None
+
+ def __init__(self, f):
+ self.func = f
+
+ def __call__(self, *args, **kwargs):
+ ret = self.func(self.func.__class__, *args, **kwargs)
+ response = ret[self.count]
+ self.count = self.count + 1
+ return response
+
+
+class MockResponse(object):
+ """
+ A mock HTTPResponse
+ """
+ headers = {}
+ body = StringIO()
+ status = 0
+ reason = ''
+ version = 11
+
+ def __init__(self, status, body=None, headers=None, reason=None):
+ self.status = status
+ self.body = StringIO(u(body)) if body else StringIO()
+ self.headers = headers or self.headers
+ self.reason = reason or self.reason
+
+ def read(self, *args, **kwargs):
+ return self.body.read(*args, **kwargs)
+
+ def next(self):
+ if sys.version_info >= (2, 5) and sys.version_info <= (2, 6):
+ return self.body.next()
+ else:
+ return next(self.body)
+
+ def __next__(self):
+ return self.next()
+
+ def getheader(self, name, *args, **kwargs):
+ return self.headers.get(name, *args, **kwargs)
+
+ def getheaders(self):
+ return list(self.headers.items())
+
+ def msg(self):
+ raise NotImplemented
+
+
+class BaseMockHttpObject(object):
+ def _get_method_name(self, type, use_param, qs, path):
+ path = path.split('?')[0]
+ meth_name = path.replace('/', '_').replace('.', '_').replace('-', '_')
+
+ if type:
+ meth_name = '%s_%s' % (meth_name, self.type)
+
+ if use_param and use_param in qs:
+ param = qs[use_param][0].replace('.', '_').replace('-', '_')
+ meth_name = '%s_%s' % (meth_name, param)
+
+ return meth_name
+
+
+class MockHttp(BaseMockHttpObject):
+ """
+ A mock HTTP client/server suitable for testing purposes. This replaces
+ `HTTPConnection` by implementing its API and returning a mock response.
+
+ Define methods by request path, replacing slashes (/) with underscores (_).
+ Each of these mock methods should return a tuple of:
+
+ (int status, str body, dict headers, str reason)
+
+ >>> mock = MockHttp('localhost', 8080)
+ >>> mock.request('GET', '/example/')
+ >>> response = mock.getresponse()
+ >>> response.body.read()
+ 'Hello World!'
+ >>> response.status
+ 200
+ >>> response.getheaders()
+ [('X-Foo', 'libcloud')]
+ >>> MockHttp.type = 'fail'
+ >>> mock.request('GET', '/example/')
+ >>> response = mock.getresponse()
+ >>> response.body.read()
+ 'Oh Noes!'
+ >>> response.status
+ 403
+ >>> response.getheaders()
+ [('X-Foo', 'fail')]
+
+ """
+ responseCls = MockResponse
+ host = None
+ port = None
+ response = None
+
+ type = None
+ use_param = None # will use this param to namespace the request function
+
+ test = None # TestCase instance which is using this mock
+
+ def __init__(self, host, port, *args, **kwargs):
+ self.host = host
+ self.port = port
+
+ def request(self, method, url, body=None, headers=None, raw=False):
+ # Find a method we can use for this request
+ parsed = urlparse.urlparse(url)
+ scheme, netloc, path, params, query, fragment = parsed
+ qs = parse_qs(query)
+ if path.endswith('/'):
+ path = path[:-1]
+ meth_name = self._get_method_name(type=self.type,
+ use_param=self.use_param,
+ qs=qs, path=path)
+ meth = getattr(self, meth_name.replace('%', '_'))
+
+ if self.test and isinstance(self.test, LibcloudTestCase):
+ self.test._add_visited_url(url=url)
+ self.test._add_executed_mock_method(method_name=meth_name)
+
+ status, body, headers, reason = meth(method, url, body, headers)
+ self.response = self.responseCls(status, body, headers, reason)
+
+ def getresponse(self):
+ return self.response
+
+ def connect(self):
+ """
+ Can't think of anything to mock here.
+ """
+ pass
+
+ def close(self):
+ pass
+
+ # Mock request/response example
+ def _example(self, method, url, body, headers):
+ """
+ Return a simple message and header, regardless of input.
+ """
+ return (httplib.OK, 'Hello World!', {'X-Foo': 'libcloud'},
+ httplib.responses[httplib.OK])
+
+ def _example_fail(self, method, url, body, headers):
+ return (httplib.FORBIDDEN, 'Oh Noes!', {'X-Foo': 'fail'},
+ httplib.responses[httplib.FORBIDDEN])
+
+
+class MockHttpTestCase(MockHttp, unittest.TestCase):
+ # Same as the MockHttp class, but you can also use assertions in the
+ # classes which inherit from this one.
+ def __init__(self, *args, **kwargs):
+ unittest.TestCase.__init__(self)
+
+ if kwargs.get('host', None) and kwargs.get('port', None):
+ MockHttp.__init__(self, *args, **kwargs)
+
+ def runTest(self):
+ pass
+
+ def assertUrlContainsQueryParams(self, url, expected_params, strict=False):
+ """
+ Assert that provided url contains provided query parameters.
+
+ :param url: URL to assert.
+ :type url: ``str``
+
+ :param expected_params: Dictionary of expected query parameters.
+ :type expected_params: ``dict``
+
+ :param strict: Assert that provided url contains only expected_params.
+ (defaults to ``False``)
+ :type strict: ``bool``
+ """
+ question_mark_index = url.find('?')
+
+ if question_mark_index != -1:
+ url = url[question_mark_index + 1:]
+
+ params = dict(parse_qsl(url))
+
+ if strict:
+ self.assertDictEqual(params, expected_params)
+ else:
+ for key, value in expected_params.items():
+ self.assertEqual(params[key], value)
+
+
+class StorageMockHttp(MockHttp):
+ def putrequest(self, method, action):
+ pass
+
+ def putheader(self, key, value):
+ pass
+
+ def endheaders(self):
+ pass
+
+ def send(self, data):
+ pass
+
+
+class MockRawResponse(BaseMockHttpObject):
+ """
+ Mock RawResponse object suitable for testing.
+ """
+
+ type = None
+ responseCls = MockResponse
+
+ def __init__(self, connection):
+ super(MockRawResponse, self).__init__()
+ self._data = []
+ self._current_item = 0
+
+ self._status = None
+ self._response = None
+ self._headers = None
+ self._reason = None
+ self.connection = connection
+
+ def next(self):
+ if self._current_item == len(self._data):
+ raise StopIteration
+
+ value = self._data[self._current_item]
+ self._current_item += 1
+ return value
+
+ def __next__(self):
+ return self.next()
+
+ def _generate_random_data(self, size):
+ data = ''
+ current_size = 0
+ while current_size < size:
+ value = str(random.randint(0, 9))
+ value_size = len(value)
+ data += value
+ current_size += value_size
+
+ return data
+
+ @property
+ def response(self):
+ return self._get_response_if_not_availale()
+
+ @property
+ def status(self):
+ self._get_response_if_not_availale()
+ return self._status
+
+ @property
+ def headers(self):
+ self._get_response_if_not_availale()
+ return self._headers
+
+ @property
+ def reason(self):
+ self._get_response_if_not_availale()
+ return self._reason
+
+ def _get_response_if_not_availale(self):
+ if not self._response:
+ meth_name = self._get_method_name(type=self.type,
+ use_param=False, qs=None,
+ path=self.connection.action)
+ meth = getattr(self, meth_name.replace('%', '_'))
+ result = meth(self.connection.method, None, None, None)
+ self._status, self._body, self._headers, self._reason = result
+ self._response = self.responseCls(self._status, self._body,
+ self._headers, self._reason)
+ return self._response
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/test/common/__init__.py b/awx/lib/site-packages/libcloud/test/common/__init__.py
new file mode 100644
index 0000000000..ae1e83eeb3
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/common/__init__.py
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/awx/lib/site-packages/libcloud/test/common/test_cloudstack.py b/awx/lib/site-packages/libcloud/test/common/test_cloudstack.py
new file mode 100644
index 0000000000..2412d5cd92
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/common/test_cloudstack.py
@@ -0,0 +1,210 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import unittest
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlparse
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import parse_qsl
+
+from libcloud.common.cloudstack import CloudStackConnection
+from libcloud.common.types import MalformedResponseError
+
+from libcloud.test import MockHttpTestCase
+
+
+async_delay = 0
+
+
+class CloudStackMockDriver(object):
+ host = 'nonexistent.'
+ path = '/path'
+ async_poll_frequency = 0
+
+ name = 'fake'
+
+ async_delay = 0
+
+
+class CloudStackCommonTest(unittest.TestCase):
+ def setUp(self):
+ CloudStackConnection.conn_classes = (None, CloudStackMockHttp)
+ self.connection = CloudStackConnection('apikey', 'secret',
+ host=CloudStackMockDriver.host)
+ self.connection.poll_interval = 0.0
+ self.driver = self.connection.driver = CloudStackMockDriver()
+
+ def test_sync_request_bad_response(self):
+ self.driver.path = '/bad/response'
+ try:
+ self.connection._sync_request('fake')
+ except Exception:
+ e = sys.exc_info()[1]
+ self.assertTrue(isinstance(e, MalformedResponseError))
+ return
+ self.assertTrue(False)
+
+ def test_sync_request(self):
+ self.driver.path = '/sync'
+ self.connection._sync_request('fake')
+
+ def test_async_request_successful(self):
+ self.driver.path = '/async/success'
+ result = self.connection._async_request('fake')
+ self.assertEqual(result, {'fake': 'result'})
+
+ def test_async_request_unsuccessful(self):
+ self.driver.path = '/async/fail'
+ try:
+ self.connection._async_request('fake')
+ except Exception:
+ e = sys.exc_info()[1]
+ self.assertEqual(CloudStackMockHttp.ERROR_TEXT, str(e))
+ return
+ self.assertFalse(True)
+
+ def test_async_request_delayed(self):
+ global async_delay
+ self.driver.path = '/async/delayed'
+ async_delay = 2
+ self.connection._async_request('fake')
+ self.assertEqual(async_delay, 0)
+
+ def test_signature_algorithm(self):
+ cases = [
+ (
+ {
+ 'command': 'listVirtualMachines'
+ }, 'z/a9Y7J52u48VpqIgiwaGUMCso0='
+ ), (
+ {
+ 'command': 'deployVirtualMachine',
+ 'name': 'fred',
+ 'displayname': 'George',
+ 'serviceofferingid': 5,
+ 'templateid': 17,
+ 'zoneid': 23,
+ 'networkids': 42
+ }, 'gHTo7mYmadZ+zluKHzlEKb1i/QU='
+ ), (
+ {
+ 'command': 'deployVirtualMachine',
+ 'name': 'fred',
+ 'displayname': 'George+Ringo',
+ 'serviceofferingid': 5,
+ 'templateid': 17,
+ 'zoneid': 23,
+ 'networkids': 42
+ }, 'tAgfrreI1ZvWlWLClD3gu4+aKv4='
+ )
+ ]
+
+ connection = CloudStackConnection('fnord', 'abracadabra')
+ for case in cases:
+ params = connection.add_default_params(case[0])
+ self.assertEqual(connection._make_signature(params), b(case[1]))
+
+
+class CloudStackMockHttp(MockHttpTestCase):
+
+ ERROR_TEXT = 'ERROR TEXT'
+
+ def _response(self, status, result, response):
+ return (status, json.dumps(result), result, response)
+
+ def _check_request(self, url):
+ url = urlparse.urlparse(url)
+ query = dict(parse_qsl(url.query))
+
+ self.assertTrue('apiKey' in query)
+ self.assertTrue('command' in query)
+ self.assertTrue('response' in query)
+ self.assertTrue('signature' in query)
+
+ self.assertTrue(query['response'] == 'json')
+
+ return query
+
+ def _bad_response(self, method, url, body, headers):
+ self._check_request(url)
+ result = {'success': True}
+ return self._response(httplib.OK, result, httplib.responses[httplib.OK])
+
+ def _sync(self, method, url, body, headers):
+ query = self._check_request(url)
+ result = {query['command'].lower() + 'response': {}}
+ return self._response(httplib.OK, result, httplib.responses[httplib.OK])
+
+ def _async_success(self, method, url, body, headers):
+ query = self._check_request(url)
+ if query['command'].lower() == 'queryasyncjobresult':
+ self.assertEqual(query['jobid'], '42')
+ result = {
+ query['command'].lower() + 'response': {
+ 'jobstatus': 1,
+ 'jobresult': {'fake': 'result'}
+ }
+ }
+ else:
+ result = {query['command'].lower() + 'response': {'jobid': '42'}}
+ return self._response(httplib.OK, result, httplib.responses[httplib.OK])
+
+ def _async_fail(self, method, url, body, headers):
+ query = self._check_request(url)
+ if query['command'].lower() == 'queryasyncjobresult':
+ self.assertEqual(query['jobid'], '42')
+ result = {
+ query['command'].lower() + 'response': {
+ 'jobstatus': 2,
+ 'jobresult': {'errortext': self.ERROR_TEXT}
+ }
+ }
+ else:
+ result = {query['command'].lower() + 'response': {'jobid': '42'}}
+ return self._response(httplib.OK, result, httplib.responses[httplib.OK])
+
+ def _async_delayed(self, method, url, body, headers):
+ global async_delay
+
+ query = self._check_request(url)
+ if query['command'].lower() == 'queryasyncjobresult':
+ self.assertEqual(query['jobid'], '42')
+ if async_delay == 0:
+ result = {
+ query['command'].lower() + 'response': {
+ 'jobstatus': 1,
+ 'jobresult': {'fake': 'result'}
+ }
+ }
+ else:
+ result = {
+ query['command'].lower() + 'response': {
+ 'jobstatus': 0,
+ }
+ }
+ async_delay -= 1
+ else:
+ result = {query['command'].lower() + 'response': {'jobid': '42'}}
+ return self._response(httplib.OK, result, httplib.responses[httplib.OK])
+
+if __name__ == '__main__':
+ sys.exit(unittest.main())
diff --git a/awx/lib/site-packages/libcloud/test/common/test_gandi.py b/awx/lib/site-packages/libcloud/test/common/test_gandi.py
new file mode 100644
index 0000000000..d0dfc9dd8e
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/common/test_gandi.py
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.utils.py3 import xmlrpclib
+from libcloud.test import MockHttp
+
+
+class BaseGandiMockHttp(MockHttp):
+
+ def _get_method_name(self, type, use_param, qs, path):
+ return "_xmlrpc"
+
+ def _xmlrpc(self, method, url, body, headers):
+ params, methodName = xmlrpclib.loads(body)
+ meth_name = '_xmlrpc__' + methodName.replace('.', '_')
+ if self.type:
+ meth_name = '%s_%s' % (meth_name, self.type)
+ return getattr(self, meth_name)(method, url, body, headers)
diff --git a/awx/lib/site-packages/libcloud/test/common/test_google.py b/awx/lib/site-packages/libcloud/test/common/test_google.py
new file mode 100644
index 0000000000..2e9c701dd8
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/common/test_google.py
@@ -0,0 +1,244 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Tests for Google Connection classes.
+"""
+import datetime
+import sys
+import unittest
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.utils.py3 import httplib
+
+from libcloud.test import MockHttp, LibcloudTestCase
+from libcloud.common.google import (GoogleAuthError,
+ GoogleBaseAuthConnection,
+ GoogleInstalledAppAuthConnection,
+ GoogleServiceAcctAuthConnection,
+ GoogleBaseConnection)
+from libcloud.test.secrets import GCE_PARAMS
+
+# Skip some tests if PyCrypto is unavailable
+try:
+ from Crypto.Hash import SHA256
+except ImportError:
+ SHA256 = None
+
+
+class MockJsonResponse(object):
+ def __init__(self, body):
+ self.object = body
+
+
+class GoogleBaseAuthConnectionTest(LibcloudTestCase):
+ """
+ Tests for GoogleBaseAuthConnection
+ """
+ GoogleBaseAuthConnection._now = lambda x: datetime.datetime(2013, 6, 26,
+ 19, 0, 0)
+
+ def setUp(self):
+ GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp,
+ GoogleAuthMockHttp)
+ self.mock_scopes = ['foo', 'bar']
+ kwargs = {'scopes': self.mock_scopes}
+ self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS,
+ **kwargs)
+
+ def test_scopes(self):
+ self.assertEqual(self.conn.scopes, 'foo bar')
+
+ def test_add_default_headers(self):
+ old_headers = {}
+ expected_headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Host': 'accounts.google.com'}
+ new_headers = self.conn.add_default_headers(old_headers)
+ self.assertEqual(new_headers, expected_headers)
+
+ def test_token_request(self):
+ request_body = {'code': 'asdf', 'client_id': self.conn.user_id,
+ 'client_secret': self.conn.key,
+ 'redirect_uri': self.conn.redirect_uri,
+ 'grant_type': 'authorization_code'}
+ new_token = self.conn._token_request(request_body)
+ self.assertEqual(new_token['access_token'], 'installedapp')
+ self.assertEqual(new_token['expire_time'], '2013-06-26T20:00:00Z')
+
+
+class GoogleInstalledAppAuthConnectionTest(LibcloudTestCase):
+ """
+ Tests for GoogleInstalledAppAuthConnection
+ """
+ GoogleInstalledAppAuthConnection.get_code = lambda x: '1234'
+
+ def setUp(self):
+ GoogleInstalledAppAuthConnection.conn_classes = (GoogleAuthMockHttp,
+ GoogleAuthMockHttp)
+ self.mock_scopes = ['https://www.googleapis.com/auth/foo']
+ kwargs = {'scopes': self.mock_scopes}
+ self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS,
+ **kwargs)
+
+ def test_refresh_token(self):
+ # This token info doesn't have a refresh token, so a new token will be
+ # requested
+ token_info1 = {'access_token': 'tokentoken', 'token_type': 'Bearer',
+ 'expires_in': 3600}
+ new_token1 = self.conn.refresh_token(token_info1)
+ self.assertEqual(new_token1['access_token'], 'installedapp')
+
+ # This token info has a refresh token, so it will be able to be
+ # refreshed.
+ token_info2 = {'access_token': 'tokentoken', 'token_type': 'Bearer',
+ 'expires_in': 3600, 'refresh_token': 'refreshrefresh'}
+ new_token2 = self.conn.refresh_token(token_info2)
+ self.assertEqual(new_token2['access_token'], 'refreshrefresh')
+
+ # Both sets should have refresh info
+ self.assertTrue('refresh_token' in new_token1)
+ self.assertTrue('refresh_token' in new_token2)
+
+
+class GoogleBaseConnectionTest(LibcloudTestCase):
+ """
+ Tests for GoogleBaseConnection
+ """
+ GoogleBaseConnection._get_token_info_from_file = lambda x: None
+ GoogleBaseConnection._write_token_info_to_file = lambda x: None
+ GoogleInstalledAppAuthConnection.get_code = lambda x: '1234'
+ GoogleServiceAcctAuthConnection.get_new_token = \
+ lambda x: x._token_request({})
+ GoogleBaseConnection._now = lambda x: datetime.datetime(2013, 6, 26,
+ 19, 0, 0)
+
+ def setUp(self):
+ GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp,
+ GoogleAuthMockHttp)
+ self.mock_scopes = ['https://www.googleapis.com/auth/foo']
+ kwargs = {'scopes': self.mock_scopes, 'auth_type': 'IA'}
+ self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs)
+
+ def test_auth_type(self):
+ self.assertRaises(GoogleAuthError, GoogleBaseConnection, *GCE_PARAMS,
+ **{'auth_type': 'XX'})
+
+ kwargs = {'scopes': self.mock_scopes}
+
+ if SHA256:
+ kwargs['auth_type'] = 'SA'
+ conn1 = GoogleBaseConnection(*GCE_PARAMS, **kwargs)
+ self.assertTrue(isinstance(conn1.auth_conn,
+ GoogleServiceAcctAuthConnection))
+
+ kwargs['auth_type'] = 'IA'
+ conn2 = GoogleBaseConnection(*GCE_PARAMS, **kwargs)
+ self.assertTrue(isinstance(conn2.auth_conn,
+ GoogleInstalledAppAuthConnection))
+
+ def test_add_default_headers(self):
+ old_headers = {}
+ new_expected_headers = {'Content-Type': 'application/json',
+ 'Host': 'www.googleapis.com'}
+ new_headers = self.conn.add_default_headers(old_headers)
+ self.assertEqual(new_headers, new_expected_headers)
+
+ def test_pre_connect_hook(self):
+ old_params = {}
+ old_headers = {}
+ new_expected_params = {}
+ new_expected_headers = {'Authorization': 'Bearer installedapp'}
+ new_params, new_headers = self.conn.pre_connect_hook(old_params,
+ old_headers)
+ self.assertEqual(new_params, new_expected_params)
+ self.assertEqual(new_headers, new_expected_headers)
+
+ def test_encode_data(self):
+ data = {'key': 'value'}
+ json_data = '{"key": "value"}'
+ encoded_data = self.conn.encode_data(data)
+ self.assertEqual(encoded_data, json_data)
+
+ def test_has_completed(self):
+ body1 = {"endTime": "2013-06-26T10:05:07.630-07:00",
+ "id": "3681664092089171723",
+ "kind": "compute#operation",
+ "status": "DONE",
+ "targetId": "16211908079305042870"}
+ body2 = {"endTime": "2013-06-26T10:05:07.630-07:00",
+ "id": "3681664092089171723",
+ "kind": "compute#operation",
+ "status": "RUNNING",
+ "targetId": "16211908079305042870"}
+ response1 = MockJsonResponse(body1)
+ response2 = MockJsonResponse(body2)
+ self.assertTrue(self.conn.has_completed(response1))
+ self.assertFalse(self.conn.has_completed(response2))
+
+ def test_get_poll_request_kwargs(self):
+ body = {"endTime": "2013-06-26T10:05:07.630-07:00",
+ "id": "3681664092089171723",
+ "kind": "compute#operation",
+ "selfLink": "https://www.googleapis.com/operations-test"}
+ response = MockJsonResponse(body)
+ expected_kwargs = {'action':
+ 'https://www.googleapis.com/operations-test'}
+ kwargs = self.conn.get_poll_request_kwargs(response, None, {})
+ self.assertEqual(kwargs, expected_kwargs)
+
+ def test_morph_action_hook(self):
+ self.conn.request_path = '/compute/apiver/project/project-name'
+ action1 = ('https://www.googleapis.com/compute/apiver/project'
+ '/project-name/instances')
+ action2 = '/instances'
+ expected_request = '/compute/apiver/project/project-name/instances'
+ request1 = self.conn.morph_action_hook(action1)
+ request2 = self.conn.morph_action_hook(action2)
+ self.assertEqual(request1, expected_request)
+ self.assertEqual(request2, expected_request)
+
+
+class GoogleAuthMockHttp(MockHttp):
+ """
+ Mock HTTP Class for Google Auth Connections.
+ """
+ json_hdr = {'content-type': 'application/json; charset=UTF-8'}
+
+ def _o_oauth2_token(self, method, url, body, headers):
+ token_info = {'access_token': 'tokentoken',
+ 'token_type': 'Bearer',
+ 'expires_in': 3600}
+ refresh_token = {'access_token': 'refreshrefresh',
+ 'token_type': 'Bearer',
+ 'expires_in': 3600}
+ ia_token = {'access_token': 'installedapp',
+ 'token_type': 'Bearer',
+ 'expires_in': 3600,
+ 'refresh_token': 'refreshrefresh'}
+ if 'code' in body:
+ body = json.dumps(ia_token)
+ elif 'refresh_token' in body:
+ body = json.dumps(refresh_token)
+ else:
+ body = json.dumps(token_info)
+ return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK])
+
+
+if __name__ == '__main__':
+ sys.exit(unittest.main())
diff --git a/awx/lib/site-packages/libcloud/test/common/test_openstack.py b/awx/lib/site-packages/libcloud/test/common/test_openstack.py
new file mode 100644
index 0000000000..03f0542973
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/common/test_openstack.py
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import unittest
+
+from mock import Mock
+
+from libcloud.common.openstack import OpenStackBaseConnection
+from libcloud.utils.py3 import PY25
+
+
+class OpenStackBaseConnectionTest(unittest.TestCase):
+
+ def setUp(self):
+ self.timeout = 10
+ OpenStackBaseConnection.conn_classes = (None, Mock())
+ self.connection = OpenStackBaseConnection('foo', 'bar',
+ timeout=self.timeout,
+ ex_force_auth_url='https://127.0.0.1')
+ self.connection.driver = Mock()
+ self.connection.driver.name = 'OpenStackDriver'
+
+ def test_base_connection_timeout(self):
+ self.connection.connect()
+ self.assertEqual(self.connection.timeout, self.timeout)
+ if PY25:
+ self.connection.conn_classes[1].assert_called_with(host='127.0.0.1',
+ port=443)
+ else:
+ self.connection.conn_classes[1].assert_called_with(host='127.0.0.1',
+ port=443,
+ timeout=10)
+
+
+if __name__ == '__main__':
+ sys.exit(unittest.main())
diff --git a/awx/lib/site-packages/libcloud/test/compute/__init__.py b/awx/lib/site-packages/libcloud/test/compute/__init__.py
new file mode 100644
index 0000000000..42e478c349
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/__init__.py
@@ -0,0 +1,105 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from libcloud.compute.base import Node, NodeImage, NodeLocation, StorageVolume
+from libcloud.pricing import get_pricing
+
+
+class TestCaseMixin(object):
+ should_list_locations = True
+ should_have_pricing = False
+ should_list_volumes = False
+
+ def test_list_nodes_response(self):
+ nodes = self.driver.list_nodes()
+ self.assertTrue(isinstance(nodes, list))
+ for node in nodes:
+ self.assertTrue(isinstance(node, Node))
+
+ def test_list_sizes_response(self):
+ sizes = self.driver.list_sizes()
+ size = sizes[0]
+ self.assertTrue(isinstance(sizes, list))
+ # Check that size values are ints or None
+ self.assertTrue(size.ram is None or isinstance(size.ram, int))
+ self.assertTrue(size.disk is None or isinstance(size.disk, int))
+ self.assertTrue(size.bandwidth is None or
+ isinstance(size.bandwidth, int))
+ # Check that price values are ints, floats, or None.
+ self.assertTrue(size.price is None or isinstance(size.price, float)
+ or isinstance(size.price, int))
+
+ def test_list_images_response(self):
+ images = self.driver.list_images()
+ self.assertTrue(isinstance(images, list))
+ for image in images:
+ self.assertTrue(isinstance(image, NodeImage))
+
+ def test_list_volumes_response(self):
+ if not self.should_list_volumes:
+ return None
+
+ volumes = self.driver.list_volumes()
+ self.assertTrue(isinstance(volumes, list))
+ for volume in volumes:
+ self.assertTrue(isinstance(volume, StorageVolume))
+
+ def test_list_locations_response(self):
+ if not self.should_list_locations:
+ return None
+
+ locations = self.driver.list_locations()
+ self.assertTrue(isinstance(locations, list))
+ for dc in locations:
+ self.assertTrue(isinstance(dc, NodeLocation))
+
+ def test_create_node_response(self):
+ # should return a node object
+ size = self.driver.list_sizes()[0]
+ image = self.driver.list_images()[0]
+ node = self.driver.create_node(name='node-name',
+ image=image,
+ size=size)
+ self.assertTrue(isinstance(node, Node))
+
+ def test_destroy_node_response(self):
+ # should return a node object
+ node = self.driver.list_nodes()[0]
+ ret = self.driver.destroy_node(node)
+ self.assertTrue(isinstance(ret, bool))
+
+ def test_reboot_node_response(self):
+ # should return a node object
+ node = self.driver.list_nodes()[0]
+ ret = self.driver.reboot_node(node)
+ self.assertTrue(isinstance(ret, bool))
+
+ def test_get_pricing_success(self):
+ if not self.should_have_pricing:
+ return None
+
+ driver_type = 'compute'
+ try:
+ get_pricing(driver_type=driver_type,
+ driver_name=self.driver.api_name)
+ except KeyError:
+ self.fail("No {driver_type!r} pricing info for {driver}.".format(
+ driver=self.driver.__class__.__name__,
+ driver_type=driver_type,
+ ))
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/dcs.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/dcs.xml
new file mode 100644
index 0000000000..a014a42fbe
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/dcs.xml
@@ -0,0 +1,78 @@
+
+
+
+
+
+
+
+
+ 2
+ barcelona
+ barcelona
+
+
+
+
+
+ 3
+ 1
+ VIRTUAL_FACTORY
+ http://10.60.12.7:80/virtualfactory
+
+
+
+
+
+ 4
+ 1
+ VIRTUAL_SYSTEM_MONITOR
+ http://10.60.12.7:80/vsm
+
+
+
+
+
+ 5
+ 1
+ APPLIANCE_MANAGER
+ http://10.60.12.7:80/am
+
+
+
+
+
+ 6
+ 1
+ NODE_COLLECTOR
+ http://10.60.12.7:80/nodecollector
+
+
+
+
+
+ 7
+ 1
+ STORAGE_SYSTEM_MONITOR
+ http://10.60.12.7:80/ssm
+
+
+
+
+ 8
+ 1
+ DHCP_SERVICE
+ omapi://10.60.12.7:7911
+
+
+
+
+
+ 9
+ 1
+ BPM_SERVICE
+ http://10.60.12.7:80/bpm-async
+
+
+ Abiquo
+
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1.xml
new file mode 100644
index 0000000000..80b055478a
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1.xml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 1
+ false
+ Abiquo
+ 0
+ 0
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml
new file mode 100644
index 0000000000..61264c7e3c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+2virtual image repo010.60.1.72:/opt/vm_repository0
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml
new file mode 100644
index 0000000000..7127c102db
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_template_11.xml
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ 11
+ m0n0wall-vhd
+ m0n0wall image in VHD format ready for XenServer and HyperV
+ 1/rs/abiport9000/ovf/269/m0n0wall-1.3b18-i386-flat.vmdk-VHD_SPARSE.vhd
+ VHD_SPARSE
+ 10490880
+ 1
+ 128
+ 27262976
+ false
+ 0
+ 2013-01-10T20:25:12-05:00
+ SYSTEM
+ false
+ http://icons.abiquo.com/monowall.jpg
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml
new file mode 100644
index 0000000000..a165f1235c
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcrep_2_templates.xml
@@ -0,0 +1,41 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ 11m0n0wall-vhdm0n0wall image in VHD format ready for XenServer and HyperV1/rs/abiport9000/ovf/269/m0n0wall-1.3b18-i386-flat.vmdk-VHD_SPARSE.vhdVHD_SPARSE10490880112827262976false02013-01-10T20:25:12-05:00SYSTEMfalsehttp://icons.abiquo.com/monowall.jpg
+
+
+
+
+
+
+
+
+
+
+
+ 19
+ RHEL6 Build Bot
+ RHEL6 Build Bot
+ 1/rs/abiport9000/ovf/73/build-bot-rhel6-disk1.vmdk
+ VMDK_STREAM_OPTIMIZED
+ 351064576
+ 1
+ 1024
+ 4294967296
+ false
+ 0
+ 2013-01-10T20:25:12-05:00
+ SYSTEM
+ false
+ http://rs.bcn.abiquo.com:9000/public/icons/q.png
+
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml
new file mode 100644
index 0000000000..b8b4fca982
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/ent_1_dcreps.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+ 2virtual image repo010.60.1.72:/opt/vm_repository0
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/login.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/login.xml
new file mode 100644
index 0000000000..32ac4d4938
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/login.xml
@@ -0,0 +1,17 @@
+
+
+
+
+
+
+ true
+ ABIQUO
+ Standard user
+
+ 2
+ en_US
+ Standard
+ user
+ c69a39bd64ffb77ea7ee3369dce742f3
+ User
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/not_found_error.xml b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/not_found_error.xml
new file mode 100644
index 0000000000..8aeb650dc0
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/not_found_error.xml
@@ -0,0 +1,7 @@
+
+
+
+ DC-0
+ The requested datacenter does not exist
+
+
diff --git a/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/privilege_errors.html b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/privilege_errors.html
new file mode 100644
index 0000000000..888e4bd751
--- /dev/null
+++ b/awx/lib/site-packages/libcloud/test/compute/fixtures/abiquo/privilege_errors.html
@@ -0,0 +1,23 @@
+
+
+ Apache Tomcat/6.0.35 - Error report
+
+
+
+
HTTP Status 403 - Access is denied
+
+
+ type
+ Status report
+
+ message
+ Access is denied
+
+
+ description
+ Access to the specified resource (Access is denied) has been forbidden.
+