mirror of
https://github.com/ansible/awx.git
synced 2026-03-06 19:21:06 -03:30
Vendoring libcloud.
This commit is contained in:
65
awx/lib/site-packages/libcloud/__init__.py
Normal file
65
awx/lib/site-packages/libcloud/__init__.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
libcloud provides a unified interface to the cloud computing resources.
|
||||
|
||||
:var __version__: Current version of libcloud
|
||||
"""
|
||||
|
||||
__all__ = ['__version__', 'enable_debug']
|
||||
__version__ = '0.15.1'
|
||||
|
||||
import os
|
||||
|
||||
try:
|
||||
import paramiko
|
||||
have_paramiko = True
|
||||
except ImportError:
|
||||
have_paramiko = False
|
||||
|
||||
|
||||
def enable_debug(fo):
|
||||
"""
|
||||
Enable library wide debugging to a file-like object.
|
||||
|
||||
:param fo: Where to append debugging information
|
||||
:type fo: File like object, only write operations are used.
|
||||
"""
|
||||
from libcloud.common.base import (Connection,
|
||||
LoggingHTTPConnection,
|
||||
LoggingHTTPSConnection)
|
||||
LoggingHTTPSConnection.log = fo
|
||||
LoggingHTTPConnection.log = fo
|
||||
Connection.conn_classes = (LoggingHTTPConnection,
|
||||
LoggingHTTPSConnection)
|
||||
|
||||
|
||||
def _init_once():
|
||||
"""
|
||||
Utility function that is ran once on Library import.
|
||||
|
||||
This checks for the LIBCLOUD_DEBUG environment variable, which if it exists
|
||||
is where we will log debug information about the provider transports.
|
||||
"""
|
||||
path = os.getenv('LIBCLOUD_DEBUG')
|
||||
if path:
|
||||
fo = open(path, 'a')
|
||||
enable_debug(fo)
|
||||
|
||||
if have_paramiko:
|
||||
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
|
||||
|
||||
_init_once()
|
||||
0
awx/lib/site-packages/libcloud/common/__init__.py
Normal file
0
awx/lib/site-packages/libcloud/common/__init__.py
Normal file
260
awx/lib/site-packages/libcloud/common/abiquo.py
Normal file
260
awx/lib/site-packages/libcloud/common/abiquo.py
Normal file
@@ -0,0 +1,260 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Abiquo Utilities Module for the Abiquo Driver.
|
||||
|
||||
Common utilities needed by the :class:`AbiquoNodeDriver`.
|
||||
"""
|
||||
import base64
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, PollingConnection
|
||||
from libcloud.common.base import XmlResponse
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import urlparse
|
||||
from libcloud.utils.py3 import b
|
||||
from libcloud.compute.base import NodeState
|
||||
|
||||
|
||||
def get_href(element, rel):
|
||||
"""
|
||||
Search a RESTLink element in the :class:`AbiquoResponse`.
|
||||
|
||||
Abiquo, as a REST API, it offers self-discovering functionality.
|
||||
That means that you could walk through the whole API only
|
||||
navigating from the links offered by the entities.
|
||||
|
||||
This is a basic method to find the 'relations' of an entity searching into
|
||||
its links.
|
||||
|
||||
For instance, a Rack entity serialized as XML as the following::
|
||||
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<rack>
|
||||
<link href="http://host/api/admin/datacenters/1"
|
||||
type="application/vnd.abiquo.datacenter+xml" rel="datacenter"/>
|
||||
<link href="http://host/api/admin/datacenters/1/racks/1"
|
||||
type="application/vnd.abiquo.rack+xml" rel="edit"/>
|
||||
<link href="http://host/api/admin/datacenters/1/racks/1/machines"
|
||||
type="application/vnd.abiquo.machines+xml" rel="machines"/>
|
||||
<haEnabled>false</haEnabled>
|
||||
<id>1</id>
|
||||
<longDescription></longDescription>
|
||||
<name>racacaca</name>
|
||||
<nrsq>10</nrsq>
|
||||
<shortDescription></shortDescription>
|
||||
<vlanIdMax>4094</vlanIdMax>
|
||||
<vlanIdMin>2</vlanIdMin>
|
||||
<vlanPerVdcReserved>1</vlanPerVdcReserved>
|
||||
<vlansIdAvoided></vlansIdAvoided>
|
||||
</rack>
|
||||
|
||||
offers link to datacenters (rel='datacenter'), to itself (rel='edit') and
|
||||
to the machines defined in it (rel='machines')
|
||||
|
||||
A call to this method with the 'rack' element using 'datacenter' as 'rel'
|
||||
will return:
|
||||
|
||||
'http://10.60.12.7:80/api/admin/datacenters/1'
|
||||
|
||||
:type element: :class:`xml.etree.ElementTree`
|
||||
:param element: Xml Entity returned by Abiquo API (required)
|
||||
:type rel: ``str``
|
||||
:param rel: relation link name
|
||||
:rtype: ``str``
|
||||
:return: the 'href' value according to the 'rel' input parameter
|
||||
"""
|
||||
links = element.findall('link')
|
||||
for link in links:
|
||||
if link.attrib['rel'] == rel:
|
||||
href = link.attrib['href']
|
||||
# href is something like:
|
||||
#
|
||||
# 'http://localhost:80/api/admin/enterprises'
|
||||
#
|
||||
# we are only interested in '/admin/enterprises/' part
|
||||
needle = '/api/'
|
||||
url_path = urlparse.urlparse(href).path
|
||||
index = url_path.find(needle)
|
||||
result = url_path[index + len(needle) - 1:]
|
||||
return result
|
||||
|
||||
|
||||
class AbiquoResponse(XmlResponse):
|
||||
"""
|
||||
Abiquo XML Response.
|
||||
|
||||
Wraps the response in XML bodies or extract the error data in
|
||||
case of error.
|
||||
"""
|
||||
|
||||
# Map between abiquo state and Libcloud State
|
||||
NODE_STATE_MAP = {
|
||||
'NOT_ALLOCATED': NodeState.TERMINATED,
|
||||
'ALLOCATED': NodeState.PENDING,
|
||||
'CONFIGURED': NodeState.PENDING,
|
||||
'ON': NodeState.RUNNING,
|
||||
'PAUSED': NodeState.PENDING,
|
||||
'OFF': NodeState.PENDING,
|
||||
'LOCKED': NodeState.PENDING,
|
||||
'UNKNOWN': NodeState.UNKNOWN
|
||||
}
|
||||
|
||||
def parse_error(self):
|
||||
"""
|
||||
Parse the error messages.
|
||||
|
||||
Response body can easily be handled by this class parent
|
||||
:class:`XmlResponse`, but there are use cases which Abiquo API
|
||||
does not respond an XML but an HTML. So we need to
|
||||
handle these special cases.
|
||||
"""
|
||||
if self.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError(driver=self.connection.driver)
|
||||
elif self.status == httplib.FORBIDDEN:
|
||||
raise ForbiddenError(self.connection.driver)
|
||||
else:
|
||||
errors = self.parse_body().findall('error')
|
||||
# Most of the exceptions only have one error
|
||||
raise LibcloudError(errors[0].findtext('message'))
|
||||
|
||||
def success(self):
|
||||
"""
|
||||
Determine if the request was successful.
|
||||
|
||||
Any of the 2XX HTTP response codes are accepted as successfull requests
|
||||
|
||||
:rtype: ``bool``
|
||||
:return: successful request or not.
|
||||
"""
|
||||
return self.status in [httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
|
||||
httplib.ACCEPTED]
|
||||
|
||||
def async_success(self):
|
||||
"""
|
||||
Determinate if async request was successful.
|
||||
|
||||
An async_request retrieves for a task object that can be successfully
|
||||
retrieved (self.status == OK), but the asyncronous task (the body of
|
||||
the HTTP response) which we are asking for has finished with an error.
|
||||
So this method checks if the status code is 'OK' and if the task
|
||||
has finished successfully.
|
||||
|
||||
:rtype: ``bool``
|
||||
:return: successful asynchronous request or not
|
||||
"""
|
||||
if self.success():
|
||||
# So we have a 'task' object in the body
|
||||
task = self.parse_body()
|
||||
return task.findtext('state') == 'FINISHED_SUCCESSFULLY'
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class AbiquoConnection(ConnectionUserAndKey, PollingConnection):
|
||||
"""
|
||||
A Connection to Abiquo API.
|
||||
|
||||
Basic :class:`ConnectionUserAndKey` connection with
|
||||
:class:`PollingConnection` features for asynchronous tasks.
|
||||
"""
|
||||
|
||||
responseCls = AbiquoResponse
|
||||
|
||||
def __init__(self, user_id, key, secure=True, host=None, port=None,
|
||||
url=None, timeout=None):
|
||||
super(AbiquoConnection, self).__init__(user_id=user_id, key=key,
|
||||
secure=secure,
|
||||
host=host, port=port,
|
||||
url=url, timeout=timeout)
|
||||
|
||||
# This attribute stores data cached across multiple request
|
||||
self.cache = {}
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
"""
|
||||
Add Basic Authentication header to all the requests.
|
||||
|
||||
It injects the 'Authorization: Basic Base64String===' header
|
||||
in each request
|
||||
|
||||
:type headers: ``dict``
|
||||
:param headers: Default input headers
|
||||
:rtype ``dict``
|
||||
:return: Default input headers with the 'Authorization'
|
||||
header
|
||||
"""
|
||||
b64string = b('%s:%s' % (self.user_id, self.key))
|
||||
encoded = base64.b64encode(b64string).decode('utf-8')
|
||||
|
||||
authorization = 'Basic ' + encoded
|
||||
|
||||
headers['Authorization'] = authorization
|
||||
return headers
|
||||
|
||||
def get_poll_request_kwargs(self, response, context, request_kwargs):
|
||||
"""
|
||||
Manage polling request arguments.
|
||||
|
||||
Return keyword arguments which are passed to the
|
||||
:class:`NodeDriver.request` method when polling for the job status. The
|
||||
Abiquo Asynchronous Response returns and 'acceptedrequest' XmlElement
|
||||
as the following::
|
||||
|
||||
<acceptedrequest>
|
||||
<link href="http://uri/to/task" rel="status"/>
|
||||
<message>You can follow the progress in the link</message>
|
||||
</acceptedrequest>
|
||||
|
||||
We need to extract the href URI to poll.
|
||||
|
||||
:type response: :class:`xml.etree.ElementTree`
|
||||
:keyword response: Object returned by poll request.
|
||||
:type request_kwargs: ``dict``
|
||||
:keyword request_kwargs: Default request arguments and headers
|
||||
:rtype: ``dict``
|
||||
:return: Modified keyword arguments
|
||||
"""
|
||||
accepted_request_obj = response.object
|
||||
link_poll = get_href(accepted_request_obj, 'status')
|
||||
|
||||
# Override just the 'action' and 'method' keys of the previous dict
|
||||
request_kwargs['action'] = link_poll
|
||||
request_kwargs['method'] = 'GET'
|
||||
return request_kwargs
|
||||
|
||||
def has_completed(self, response):
|
||||
"""
|
||||
Decide if the asynchronous job has ended.
|
||||
|
||||
:type response: :class:`xml.etree.ElementTree`
|
||||
:param response: Response object returned by poll request
|
||||
:rtype: ``bool``
|
||||
:return: Whether the job has completed
|
||||
"""
|
||||
task = response.object
|
||||
task_state = task.findtext('state')
|
||||
return task_state in ['FINISHED_SUCCESSFULLY', 'ABORTED',
|
||||
'FINISHED_UNSUCCESSFULLY']
|
||||
|
||||
|
||||
class ForbiddenError(LibcloudError):
|
||||
"""
|
||||
Exception used when credentials are ok but user has not permissions.
|
||||
"""
|
||||
|
||||
def __init__(self, driver):
|
||||
message = 'User has not permission to perform this task.'
|
||||
super(LibcloudError, self).__init__(message, driver)
|
||||
193
awx/lib/site-packages/libcloud/common/aws.py
Normal file
193
awx/lib/site-packages/libcloud/common/aws.py
Normal file
@@ -0,0 +1,193 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import hmac
|
||||
import time
|
||||
from hashlib import sha256
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, BaseDriver
|
||||
from libcloud.common.types import InvalidCredsError, MalformedResponseError
|
||||
from libcloud.utils.py3 import b, httplib, urlquote
|
||||
from libcloud.utils.xml import findtext, findall
|
||||
|
||||
|
||||
class AWSBaseResponse(XmlResponse):
|
||||
namespace = None
|
||||
|
||||
def _parse_error_details(self, element):
|
||||
"""
|
||||
Parse code and message from the provided error element.
|
||||
|
||||
:return: ``tuple`` with two elements: (code, message)
|
||||
:rtype: ``tuple``
|
||||
"""
|
||||
code = findtext(element=element, xpath='Code',
|
||||
namespace=self.namespace)
|
||||
message = findtext(element=element, xpath='Message',
|
||||
namespace=self.namespace)
|
||||
|
||||
return code, message
|
||||
|
||||
|
||||
class AWSGenericResponse(AWSBaseResponse):
|
||||
# There are multiple error messages in AWS, but they all have an Error node
|
||||
# with Code and Message child nodes. Xpath to select them
|
||||
# None if the root node *is* the Error node
|
||||
xpath = None
|
||||
|
||||
# This dict maps <Error><Code>CodeName</Code></Error> to a specific
|
||||
# exception class that is raised immediately.
|
||||
# If a custom exception class is not defined, errors are accumulated and
|
||||
# returned from the parse_error method.
|
||||
expections = {}
|
||||
|
||||
def success(self):
|
||||
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
|
||||
|
||||
def parse_error(self):
|
||||
context = self.connection.context
|
||||
status = int(self.status)
|
||||
|
||||
# FIXME: Probably ditch this as the forbidden message will have
|
||||
# corresponding XML.
|
||||
if status == httplib.FORBIDDEN:
|
||||
if not self.body:
|
||||
raise InvalidCredsError(str(self.status) + ': ' + self.error)
|
||||
else:
|
||||
raise InvalidCredsError(self.body)
|
||||
|
||||
try:
|
||||
body = ET.XML(self.body)
|
||||
except Exception:
|
||||
raise MalformedResponseError('Failed to parse XML',
|
||||
body=self.body,
|
||||
driver=self.connection.driver)
|
||||
|
||||
if self.xpath:
|
||||
errs = findall(element=body, xpath=self.xpath,
|
||||
namespace=self.namespace)
|
||||
else:
|
||||
errs = [body]
|
||||
|
||||
msgs = []
|
||||
for err in errs:
|
||||
code, message = self._parse_error_details(element=err)
|
||||
exceptionCls = self.exceptions.get(code, None)
|
||||
|
||||
if exceptionCls is None:
|
||||
msgs.append('%s: %s' % (code, message))
|
||||
continue
|
||||
|
||||
# Custom exception class is defined, immediately throw an exception
|
||||
params = {}
|
||||
if hasattr(exceptionCls, 'kwargs'):
|
||||
for key in exceptionCls.kwargs:
|
||||
if key in context:
|
||||
params[key] = context[key]
|
||||
|
||||
raise exceptionCls(value=message, driver=self.connection.driver,
|
||||
**params)
|
||||
|
||||
return "\n".join(msgs)
|
||||
|
||||
|
||||
class AWSTokenConnection(ConnectionUserAndKey):
|
||||
def __init__(self, user_id, key, secure=True,
|
||||
host=None, port=None, url=None, timeout=None, token=None):
|
||||
self.token = token
|
||||
super(AWSTokenConnection, self).__init__(user_id, key, secure=secure,
|
||||
host=host, port=port, url=url,
|
||||
timeout=timeout)
|
||||
|
||||
def add_default_params(self, params):
|
||||
# Even though we are adding it to the headers, we need it here too
|
||||
# so that the token is added to the signature.
|
||||
if self.token:
|
||||
params['x-amz-security-token'] = self.token
|
||||
return super(AWSTokenConnection, self).add_default_params(params)
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
if self.token:
|
||||
headers['x-amz-security-token'] = self.token
|
||||
return super(AWSTokenConnection, self).add_default_headers(headers)
|
||||
|
||||
|
||||
class SignedAWSConnection(AWSTokenConnection):
|
||||
|
||||
def add_default_params(self, params):
|
||||
params['SignatureVersion'] = '2'
|
||||
params['SignatureMethod'] = 'HmacSHA256'
|
||||
params['AWSAccessKeyId'] = self.user_id
|
||||
params['Version'] = self.version
|
||||
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
|
||||
time.gmtime())
|
||||
params['Signature'] = self._get_aws_auth_param(params, self.key,
|
||||
self.action)
|
||||
return params
|
||||
|
||||
def _get_aws_auth_param(self, params, secret_key, path='/'):
|
||||
"""
|
||||
Creates the signature required for AWS, per
|
||||
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
|
||||
|
||||
StringToSign = HTTPVerb + "\n" +
|
||||
ValueOfHostHeaderInLowercase + "\n" +
|
||||
HTTPRequestURI + "\n" +
|
||||
CanonicalizedQueryString <from the preceding step>
|
||||
"""
|
||||
keys = list(params.keys())
|
||||
keys.sort()
|
||||
pairs = []
|
||||
for key in keys:
|
||||
value = str(params[key])
|
||||
pairs.append(urlquote(key, safe='') + '=' +
|
||||
urlquote(value, safe='-_~'))
|
||||
|
||||
qs = '&'.join(pairs)
|
||||
|
||||
hostname = self.host
|
||||
if (self.secure and self.port != 443) or \
|
||||
(not self.secure and self.port != 80):
|
||||
hostname += ":" + str(self.port)
|
||||
|
||||
string_to_sign = '\n'.join(('GET', hostname, path, qs))
|
||||
|
||||
b64_hmac = base64.b64encode(
|
||||
hmac.new(b(secret_key), b(string_to_sign),
|
||||
digestmod=sha256).digest()
|
||||
)
|
||||
|
||||
return b64_hmac.decode('utf-8')
|
||||
|
||||
|
||||
class AWSDriver(BaseDriver):
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
api_version=None, region=None, token=None, **kwargs):
|
||||
self.token = token
|
||||
super(AWSDriver, self).__init__(key, secret=secret, secure=secure,
|
||||
host=host, port=port,
|
||||
api_version=api_version, region=region,
|
||||
token=token, **kwargs)
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
kwargs = super(AWSDriver, self)._ex_connection_class_kwargs()
|
||||
kwargs['token'] = self.token
|
||||
return kwargs
|
||||
189
awx/lib/site-packages/libcloud/common/azure.py
Normal file
189
awx/lib/site-packages/libcloud/common/azure.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import time
|
||||
import base64
|
||||
import hmac
|
||||
|
||||
from hashlib import sha256
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
from libcloud.utils.xml import fixxpath
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
from libcloud.common.types import LibcloudError, MalformedResponseError
|
||||
from libcloud.common.base import ConnectionUserAndKey, RawResponse
|
||||
from libcloud.common.base import XmlResponse
|
||||
|
||||
# Azure API version
|
||||
API_VERSION = '2012-02-12'
|
||||
|
||||
# The time format for headers in Azure requests
|
||||
AZURE_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
|
||||
|
||||
|
||||
class AzureResponse(XmlResponse):
|
||||
|
||||
valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
|
||||
httplib.BAD_REQUEST]
|
||||
|
||||
def success(self):
|
||||
i = int(self.status)
|
||||
return i >= 200 and i <= 299 or i in self.valid_response_codes
|
||||
|
||||
def parse_error(self, msg=None):
|
||||
error_msg = 'Unknown error'
|
||||
|
||||
try:
|
||||
# Azure does give some meaningful errors, but is inconsistent
|
||||
# Some APIs respond with an XML error. Others just dump HTML
|
||||
body = self.parse_body()
|
||||
|
||||
if type(body) == ET.Element:
|
||||
code = body.findtext(fixxpath(xpath='Code'))
|
||||
message = body.findtext(fixxpath(xpath='Message'))
|
||||
message = message.split('\n')[0]
|
||||
error_msg = '%s: %s' % (code, message)
|
||||
|
||||
except MalformedResponseError:
|
||||
pass
|
||||
|
||||
if msg:
|
||||
error_msg = '%s - %s' % (msg, error_msg)
|
||||
|
||||
if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
|
||||
raise InvalidCredsError(error_msg)
|
||||
|
||||
raise LibcloudError('%s Status code: %d.' % (error_msg, self.status),
|
||||
driver=self)
|
||||
|
||||
|
||||
class AzureRawResponse(RawResponse):
|
||||
pass
|
||||
|
||||
|
||||
class AzureConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Represents a single connection to Azure
|
||||
"""
|
||||
|
||||
responseCls = AzureResponse
|
||||
rawResponseCls = AzureRawResponse
|
||||
|
||||
def add_default_params(self, params):
|
||||
return params
|
||||
|
||||
def pre_connect_hook(self, params, headers):
|
||||
headers = copy.deepcopy(headers)
|
||||
|
||||
# We have to add a date header in GMT
|
||||
headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime())
|
||||
headers['x-ms-version'] = API_VERSION
|
||||
|
||||
# Add the authorization header
|
||||
headers['Authorization'] = self._get_azure_auth_signature(
|
||||
method=self.method, headers=headers, params=params,
|
||||
account=self.user_id, secret_key=self.key, path=self.action)
|
||||
|
||||
# Azure cribs about this in 'raw' connections
|
||||
headers.pop('Host', None)
|
||||
|
||||
return params, headers
|
||||
|
||||
def _get_azure_auth_signature(self, method, headers, params,
|
||||
account, secret_key, path='/'):
|
||||
"""
|
||||
Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID,
|
||||
UTF-8-Encoding-Of( StringToSign ) ) ) );
|
||||
|
||||
StringToSign = HTTP-VERB + "\n" +
|
||||
Content-Encoding + "\n" +
|
||||
Content-Language + "\n" +
|
||||
Content-Length + "\n" +
|
||||
Content-MD5 + "\n" +
|
||||
Content-Type + "\n" +
|
||||
Date + "\n" +
|
||||
If-Modified-Since + "\n" +
|
||||
If-Match + "\n" +
|
||||
If-None-Match + "\n" +
|
||||
If-Unmodified-Since + "\n" +
|
||||
Range + "\n" +
|
||||
CanonicalizedHeaders +
|
||||
CanonicalizedResource;
|
||||
"""
|
||||
special_header_values = []
|
||||
xms_header_values = []
|
||||
param_list = []
|
||||
special_header_keys = ['content-encoding', 'content-language',
|
||||
'content-length', 'content-md5',
|
||||
'content-type', 'date', 'if-modified-since',
|
||||
'if-match', 'if-none-match',
|
||||
'if-unmodified-since', 'range']
|
||||
|
||||
# Split the x-ms headers and normal headers and make everything
|
||||
# lower case
|
||||
headers_copy = {}
|
||||
for header, value in headers.items():
|
||||
header = header.lower()
|
||||
value = str(value).strip()
|
||||
if header.startswith('x-ms-'):
|
||||
xms_header_values.append((header, value))
|
||||
else:
|
||||
headers_copy[header] = value
|
||||
|
||||
# Get the values for the headers in the specific order
|
||||
for header in special_header_keys:
|
||||
header = header.lower() # Just for safety
|
||||
if header in headers_copy:
|
||||
special_header_values.append(headers_copy[header])
|
||||
else:
|
||||
special_header_values.append('')
|
||||
|
||||
# Prepare the first section of the string to be signed
|
||||
values_to_sign = [method] + special_header_values
|
||||
# string_to_sign = '\n'.join([method] + special_header_values)
|
||||
|
||||
# The x-ms-* headers have to be in lower case and sorted
|
||||
xms_header_values.sort()
|
||||
|
||||
for header, value in xms_header_values:
|
||||
values_to_sign.append('%s:%s' % (header, value))
|
||||
|
||||
# Add the canonicalized path
|
||||
values_to_sign.append('/%s%s' % (account, path))
|
||||
|
||||
# URL query parameters (sorted and lower case)
|
||||
for key, value in params.items():
|
||||
param_list.append((key.lower(), str(value).strip()))
|
||||
|
||||
param_list.sort()
|
||||
|
||||
for key, value in param_list:
|
||||
values_to_sign.append('%s:%s' % (key, value))
|
||||
|
||||
string_to_sign = b('\n'.join(values_to_sign))
|
||||
secret_key = b(secret_key)
|
||||
b64_hmac = base64.b64encode(
|
||||
hmac.new(secret_key, string_to_sign, digestmod=sha256).digest()
|
||||
)
|
||||
|
||||
return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8'))
|
||||
968
awx/lib/site-packages/libcloud/common/base.py
Normal file
968
awx/lib/site-packages/libcloud/common/base.py
Normal file
@@ -0,0 +1,968 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ssl
|
||||
import copy
|
||||
import binascii
|
||||
import time
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from pipes import quote as pquote
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except:
|
||||
import json
|
||||
|
||||
import libcloud
|
||||
|
||||
from libcloud.utils.py3 import PY3, PY25
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import urlparse
|
||||
from libcloud.utils.py3 import urlencode
|
||||
from libcloud.utils.py3 import StringIO
|
||||
from libcloud.utils.py3 import u
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.utils.misc import lowercase_keys
|
||||
from libcloud.utils.compression import decompress_data
|
||||
from libcloud.common.types import LibcloudError, MalformedResponseError
|
||||
|
||||
from libcloud.httplib_ssl import LibcloudHTTPSConnection
|
||||
|
||||
LibcloudHTTPConnection = httplib.HTTPConnection
|
||||
|
||||
|
||||
class HTTPResponse(httplib.HTTPResponse):
|
||||
# On python 2.6 some calls can hang because HEAD isn't quite properly
|
||||
# supported.
|
||||
# In particular this happens on S3 when calls are made to get_object to
|
||||
# objects that don't exist.
|
||||
# This applies the behaviour from 2.7, fixing the hangs.
|
||||
def read(self, amt=None):
|
||||
if self.fp is None:
|
||||
return ''
|
||||
|
||||
if self._method == 'HEAD':
|
||||
self.close()
|
||||
return ''
|
||||
|
||||
return httplib.HTTPResponse.read(self, amt)
|
||||
|
||||
|
||||
class Response(object):
|
||||
"""
|
||||
A base Response class to derive from.
|
||||
"""
|
||||
|
||||
status = httplib.OK # Response status code
|
||||
headers = {} # Response headers
|
||||
body = None # Raw response body
|
||||
object = None # Parsed response body
|
||||
|
||||
error = None # Reason returned by the server.
|
||||
connection = None # Parent connection class
|
||||
parse_zero_length_body = False
|
||||
|
||||
def __init__(self, response, connection):
|
||||
"""
|
||||
:param response: HTTP response object. (optional)
|
||||
:type response: :class:`httplib.HTTPResponse`
|
||||
|
||||
:param connection: Parent connection object.
|
||||
:type connection: :class:`.Connection`
|
||||
"""
|
||||
self.connection = connection
|
||||
|
||||
# http.client In Python 3 doesn't automatically lowercase the header
|
||||
# names
|
||||
self.headers = lowercase_keys(dict(response.getheaders()))
|
||||
self.error = response.reason
|
||||
self.status = response.status
|
||||
|
||||
# This attribute is set when using LoggingConnection.
|
||||
original_data = getattr(response, '_original_data', None)
|
||||
|
||||
if original_data:
|
||||
# LoggingConnection already decompresses data so it can log it
|
||||
# which means we don't need to decompress it here.
|
||||
self.body = response._original_data
|
||||
else:
|
||||
self.body = self._decompress_response(body=response.read(),
|
||||
headers=self.headers)
|
||||
|
||||
if PY3:
|
||||
self.body = b(self.body).decode('utf-8')
|
||||
|
||||
if not self.success():
|
||||
raise Exception(self.parse_error())
|
||||
|
||||
self.object = self.parse_body()
|
||||
|
||||
def parse_body(self):
|
||||
"""
|
||||
Parse response body.
|
||||
|
||||
Override in a provider's subclass.
|
||||
|
||||
:return: Parsed body.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
return self.body
|
||||
|
||||
def parse_error(self):
|
||||
"""
|
||||
Parse the error messages.
|
||||
|
||||
Override in a provider's subclass.
|
||||
|
||||
:return: Parsed error.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
return self.body
|
||||
|
||||
def success(self):
|
||||
"""
|
||||
Determine if our request was successful.
|
||||
|
||||
The meaning of this can be arbitrary; did we receive OK status? Did
|
||||
the node get created? Were we authenticated?
|
||||
|
||||
:rtype: ``bool``
|
||||
:return: ``True`` or ``False``
|
||||
"""
|
||||
return self.status in [httplib.OK, httplib.CREATED]
|
||||
|
||||
def _decompress_response(self, body, headers):
|
||||
"""
|
||||
Decompress a response body if it is using deflate or gzip encoding.
|
||||
|
||||
:param body: Response body.
|
||||
:type body: ``str``
|
||||
|
||||
:param headers: Response headers.
|
||||
:type headers: ``dict``
|
||||
|
||||
:return: Decompressed response
|
||||
:rtype: ``str``
|
||||
"""
|
||||
encoding = headers.get('content-encoding', None)
|
||||
|
||||
if encoding in ['zlib', 'deflate']:
|
||||
body = decompress_data('zlib', body)
|
||||
elif encoding in ['gzip', 'x-gzip']:
|
||||
body = decompress_data('gzip', body)
|
||||
else:
|
||||
body = body.strip()
|
||||
|
||||
return body
|
||||
|
||||
|
||||
class JsonResponse(Response):
|
||||
"""
|
||||
A Base JSON Response class to derive from.
|
||||
"""
|
||||
|
||||
def parse_body(self):
|
||||
if len(self.body) == 0 and not self.parse_zero_length_body:
|
||||
return self.body
|
||||
|
||||
try:
|
||||
body = json.loads(self.body)
|
||||
except:
|
||||
raise MalformedResponseError(
|
||||
'Failed to parse JSON',
|
||||
body=self.body,
|
||||
driver=self.connection.driver)
|
||||
return body
|
||||
|
||||
parse_error = parse_body
|
||||
|
||||
|
||||
class XmlResponse(Response):
|
||||
"""
|
||||
A Base XML Response class to derive from.
|
||||
"""
|
||||
|
||||
def parse_body(self):
|
||||
if len(self.body) == 0 and not self.parse_zero_length_body:
|
||||
return self.body
|
||||
|
||||
try:
|
||||
body = ET.XML(self.body)
|
||||
except:
|
||||
raise MalformedResponseError('Failed to parse XML',
|
||||
body=self.body,
|
||||
driver=self.connection.driver)
|
||||
return body
|
||||
|
||||
parse_error = parse_body
|
||||
|
||||
|
||||
class RawResponse(Response):
|
||||
|
||||
def __init__(self, connection):
|
||||
"""
|
||||
:param connection: Parent connection object.
|
||||
:type connection: :class:`.Connection`
|
||||
"""
|
||||
self._status = None
|
||||
self._response = None
|
||||
self._headers = {}
|
||||
self._error = None
|
||||
self._reason = None
|
||||
self.connection = connection
|
||||
|
||||
@property
|
||||
def response(self):
|
||||
if not self._response:
|
||||
response = self.connection.connection.getresponse()
|
||||
self._response, self.body = response, response
|
||||
if not self.success():
|
||||
self.parse_error()
|
||||
return self._response
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
if not self._status:
|
||||
self._status = self.response.status
|
||||
return self._status
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
if not self._headers:
|
||||
self._headers = lowercase_keys(dict(self.response.getheaders()))
|
||||
return self._headers
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
if not self._reason:
|
||||
self._reason = self.response.reason
|
||||
return self._reason
|
||||
|
||||
|
||||
# TODO: Move this to a better location/package
|
||||
class LoggingConnection():
|
||||
"""
|
||||
Debug class to log all HTTP(s) requests as they could be made
|
||||
with the curl command.
|
||||
|
||||
:cvar log: file-like object that logs entries are written to.
|
||||
"""
|
||||
log = None
|
||||
|
||||
def _log_response(self, r):
|
||||
rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r))
|
||||
ht = ""
|
||||
v = r.version
|
||||
if r.version == 10:
|
||||
v = "HTTP/1.0"
|
||||
if r.version == 11:
|
||||
v = "HTTP/1.1"
|
||||
ht += "%s %s %s\r\n" % (v, r.status, r.reason)
|
||||
body = r.read()
|
||||
for h in r.getheaders():
|
||||
ht += "%s: %s\r\n" % (h[0].title(), h[1])
|
||||
ht += "\r\n"
|
||||
|
||||
# this is evil. laugh with me. ha arharhrhahahaha
|
||||
class fakesock:
|
||||
def __init__(self, s):
|
||||
self.s = s
|
||||
|
||||
def makefile(self, *args, **kwargs):
|
||||
if PY3:
|
||||
from io import BytesIO
|
||||
cls = BytesIO
|
||||
else:
|
||||
cls = StringIO
|
||||
|
||||
return cls(b(self.s))
|
||||
rr = r
|
||||
headers = lowercase_keys(dict(r.getheaders()))
|
||||
|
||||
encoding = headers.get('content-encoding', None)
|
||||
|
||||
if encoding in ['zlib', 'deflate']:
|
||||
body = decompress_data('zlib', body)
|
||||
elif encoding in ['gzip', 'x-gzip']:
|
||||
body = decompress_data('gzip', body)
|
||||
|
||||
if r.chunked:
|
||||
ht += "%x\r\n" % (len(body))
|
||||
ht += u(body)
|
||||
ht += "\r\n0\r\n"
|
||||
else:
|
||||
ht += u(body)
|
||||
|
||||
if sys.version_info >= (2, 6) and sys.version_info < (2, 7):
|
||||
cls = HTTPResponse
|
||||
else:
|
||||
cls = httplib.HTTPResponse
|
||||
|
||||
rr = cls(sock=fakesock(ht), method=r._method,
|
||||
debuglevel=r.debuglevel)
|
||||
rr.begin()
|
||||
rv += ht
|
||||
rv += ("\n# -------- end %d:%d response ----------\n"
|
||||
% (id(self), id(r)))
|
||||
|
||||
rr._original_data = body
|
||||
return (rr, rv)
|
||||
|
||||
def _log_curl(self, method, url, body, headers):
|
||||
cmd = ["curl", "-i"]
|
||||
|
||||
if method.lower() == 'head':
|
||||
# HEAD method need special handling
|
||||
cmd.extend(["--head"])
|
||||
else:
|
||||
cmd.extend(["-X", pquote(method)])
|
||||
|
||||
for h in headers:
|
||||
cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))])
|
||||
|
||||
# TODO: in python 2.6, body can be a file-like object.
|
||||
if body is not None and len(body) > 0:
|
||||
cmd.extend(["--data-binary", pquote(body)])
|
||||
|
||||
cmd.extend(["--compress"])
|
||||
cmd.extend([pquote("%s://%s:%d%s" % (self.protocol, self.host,
|
||||
self.port, url))])
|
||||
return " ".join(cmd)
|
||||
|
||||
|
||||
class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection):
|
||||
"""
|
||||
Utility Class for logging HTTPS connections
|
||||
"""
|
||||
|
||||
protocol = 'https'
|
||||
|
||||
def getresponse(self):
|
||||
r = LibcloudHTTPSConnection.getresponse(self)
|
||||
if self.log is not None:
|
||||
r, rv = self._log_response(r)
|
||||
self.log.write(rv + "\n")
|
||||
self.log.flush()
|
||||
return r
|
||||
|
||||
def request(self, method, url, body=None, headers=None):
|
||||
headers.update({'X-LC-Request-ID': str(id(self))})
|
||||
if self.log is not None:
|
||||
pre = "# -------- begin %d request ----------\n" % id(self)
|
||||
self.log.write(pre +
|
||||
self._log_curl(method, url, body, headers) + "\n")
|
||||
self.log.flush()
|
||||
return LibcloudHTTPSConnection.request(self, method, url, body,
|
||||
headers)
|
||||
|
||||
|
||||
class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection):
|
||||
"""
|
||||
Utility Class for logging HTTP connections
|
||||
"""
|
||||
|
||||
protocol = 'http'
|
||||
|
||||
def getresponse(self):
|
||||
r = LibcloudHTTPConnection.getresponse(self)
|
||||
if self.log is not None:
|
||||
r, rv = self._log_response(r)
|
||||
self.log.write(rv + "\n")
|
||||
self.log.flush()
|
||||
return r
|
||||
|
||||
def request(self, method, url, body=None, headers=None):
|
||||
headers.update({'X-LC-Request-ID': str(id(self))})
|
||||
if self.log is not None:
|
||||
pre = '# -------- begin %d request ----------\n' % id(self)
|
||||
self.log.write(pre +
|
||||
self._log_curl(method, url, body, headers) + "\n")
|
||||
self.log.flush()
|
||||
return LibcloudHTTPConnection.request(self, method, url,
|
||||
body, headers)
|
||||
|
||||
|
||||
class Connection(object):
|
||||
"""
|
||||
A Base Connection class to derive from.
|
||||
"""
|
||||
# conn_classes = (LoggingHTTPSConnection)
|
||||
conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection)
|
||||
|
||||
responseCls = Response
|
||||
rawResponseCls = RawResponse
|
||||
connection = None
|
||||
host = '127.0.0.1'
|
||||
port = 443
|
||||
timeout = None
|
||||
secure = 1
|
||||
driver = None
|
||||
action = None
|
||||
cache_busting = False
|
||||
|
||||
allow_insecure = True
|
||||
|
||||
def __init__(self, secure=True, host=None, port=None, url=None,
|
||||
timeout=None):
|
||||
self.secure = secure and 1 or 0
|
||||
self.ua = []
|
||||
self.context = {}
|
||||
|
||||
if not self.allow_insecure and not secure:
|
||||
# TODO: We should eventually switch to whitelist instead of
|
||||
# blacklist approach
|
||||
raise ValueError('Non https connections are not allowed (use '
|
||||
'secure=True)')
|
||||
|
||||
self.request_path = ''
|
||||
|
||||
if host:
|
||||
self.host = host
|
||||
|
||||
if port is not None:
|
||||
self.port = port
|
||||
else:
|
||||
if self.secure == 1:
|
||||
self.port = 443
|
||||
else:
|
||||
self.port = 80
|
||||
|
||||
if url:
|
||||
(self.host, self.port, self.secure,
|
||||
self.request_path) = self._tuple_from_url(url)
|
||||
|
||||
if timeout:
|
||||
self.timeout = timeout
|
||||
|
||||
def set_context(self, context):
|
||||
if not isinstance(context, dict):
|
||||
raise TypeError('context needs to be a dictionary')
|
||||
|
||||
self.context = context
|
||||
|
||||
def reset_context(self):
|
||||
self.context = {}
|
||||
|
||||
def _tuple_from_url(self, url):
|
||||
secure = 1
|
||||
port = None
|
||||
(scheme, netloc, request_path, param,
|
||||
query, fragment) = urlparse.urlparse(url)
|
||||
|
||||
if scheme not in ['http', 'https']:
|
||||
raise LibcloudError('Invalid scheme: %s in url %s' % (scheme, url))
|
||||
|
||||
if scheme == "http":
|
||||
secure = 0
|
||||
|
||||
if ":" in netloc:
|
||||
netloc, port = netloc.rsplit(":")
|
||||
port = port
|
||||
|
||||
if not port:
|
||||
if scheme == "http":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
|
||||
host = netloc
|
||||
|
||||
return (host, port, secure, request_path)
|
||||
|
||||
def connect(self, host=None, port=None, base_url=None):
|
||||
"""
|
||||
Establish a connection with the API server.
|
||||
|
||||
:type host: ``str``
|
||||
:param host: Optional host to override our default
|
||||
|
||||
:type port: ``int``
|
||||
:param port: Optional port to override our default
|
||||
|
||||
:returns: A connection
|
||||
"""
|
||||
# prefer the attribute base_url if its set or sent
|
||||
connection = None
|
||||
secure = self.secure
|
||||
|
||||
if getattr(self, 'base_url', None) and base_url is None:
|
||||
(host, port,
|
||||
secure, request_path) = self._tuple_from_url(self.base_url)
|
||||
elif base_url is not None:
|
||||
(host, port,
|
||||
secure, request_path) = self._tuple_from_url(base_url)
|
||||
else:
|
||||
host = host or self.host
|
||||
port = port or self.port
|
||||
|
||||
kwargs = {'host': host, 'port': int(port)}
|
||||
|
||||
# Timeout is only supported in Python 2.6 and later
|
||||
# http://docs.python.org/library/httplib.html#httplib.HTTPConnection
|
||||
if self.timeout and not PY25:
|
||||
kwargs.update({'timeout': self.timeout})
|
||||
|
||||
connection = self.conn_classes[secure](**kwargs)
|
||||
# You can uncoment this line, if you setup a reverse proxy server
|
||||
# which proxies to your endpoint, and lets you easily capture
|
||||
# connections in cleartext when you setup the proxy to do SSL
|
||||
# for you
|
||||
# connection = self.conn_classes[False]("127.0.0.1", 8080)
|
||||
|
||||
self.connection = connection
|
||||
|
||||
def _user_agent(self):
|
||||
user_agent_suffix = ' '.join(['(%s)' % x for x in self.ua])
|
||||
|
||||
if self.driver:
|
||||
user_agent = 'libcloud/%s (%s) %s' % (
|
||||
libcloud.__version__,
|
||||
self.driver.name, user_agent_suffix)
|
||||
else:
|
||||
user_agent = 'libcloud/%s %s' % (
|
||||
libcloud.__version__, user_agent_suffix)
|
||||
|
||||
return user_agent
|
||||
|
||||
def user_agent_append(self, token):
|
||||
"""
|
||||
Append a token to a user agent string.
|
||||
|
||||
Users of the library should call this to uniquely identify their
|
||||
requests to a provider.
|
||||
|
||||
:type token: ``str``
|
||||
:param token: Token to add to the user agent.
|
||||
"""
|
||||
self.ua.append(token)
|
||||
|
||||
def request(self, action, params=None, data=None, headers=None,
|
||||
method='GET', raw=False):
|
||||
"""
|
||||
Request a given `action`.
|
||||
|
||||
Basically a wrapper around the connection
|
||||
object's `request` that does some helpful pre-processing.
|
||||
|
||||
:type action: ``str``
|
||||
:param action: A path. This can include arguments. If included,
|
||||
any extra parameters are appended to the existing ones.
|
||||
|
||||
:type params: ``dict``
|
||||
:param params: Optional mapping of additional parameters to send. If
|
||||
None, leave as an empty ``dict``.
|
||||
|
||||
:type data: ``unicode``
|
||||
:param data: A body of data to send with the request.
|
||||
|
||||
:type headers: ``dict``
|
||||
:param headers: Extra headers to add to the request
|
||||
None, leave as an empty ``dict``.
|
||||
|
||||
:type method: ``str``
|
||||
:param method: An HTTP method such as "GET" or "POST".
|
||||
|
||||
:type raw: ``bool``
|
||||
:param raw: True to perform a "raw" request aka only send the headers
|
||||
and use the rawResponseCls class. This is used with
|
||||
storage API when uploading a file.
|
||||
|
||||
:return: An :class:`Response` instance.
|
||||
:rtype: :class:`Response` instance
|
||||
|
||||
"""
|
||||
if params is None:
|
||||
params = {}
|
||||
else:
|
||||
params = copy.copy(params)
|
||||
|
||||
if headers is None:
|
||||
headers = {}
|
||||
else:
|
||||
headers = copy.copy(headers)
|
||||
|
||||
action = self.morph_action_hook(action)
|
||||
self.action = action
|
||||
self.method = method
|
||||
|
||||
# Extend default parameters
|
||||
params = self.add_default_params(params)
|
||||
|
||||
# Add cache busting parameters (if enabled)
|
||||
if self.cache_busting and method == 'GET':
|
||||
params = self._add_cache_busting_to_params(params=params)
|
||||
|
||||
# Extend default headers
|
||||
headers = self.add_default_headers(headers)
|
||||
|
||||
# We always send a user-agent header
|
||||
headers.update({'User-Agent': self._user_agent()})
|
||||
|
||||
# Indicate that we support gzip and deflate compression
|
||||
headers.update({'Accept-Encoding': 'gzip,deflate'})
|
||||
|
||||
port = int(self.port)
|
||||
|
||||
if port not in (80, 443):
|
||||
headers.update({'Host': "%s:%d" % (self.host, port)})
|
||||
else:
|
||||
headers.update({'Host': self.host})
|
||||
|
||||
if data:
|
||||
data = self.encode_data(data)
|
||||
headers['Content-Length'] = str(len(data))
|
||||
elif method.upper() in ['POST', 'PUT'] and not raw:
|
||||
# Only send Content-Length 0 with POST and PUT request.
|
||||
#
|
||||
# Note: Content-Length is not added when using "raw" mode means
|
||||
# means that headers are upfront and the body is sent at some point
|
||||
# later on. With raw mode user can specify Content-Length with
|
||||
# "data" not being set.
|
||||
headers['Content-Length'] = '0'
|
||||
|
||||
params, headers = self.pre_connect_hook(params, headers)
|
||||
|
||||
if params:
|
||||
if '?' in action:
|
||||
url = '&'.join((action, urlencode(params, doseq=True)))
|
||||
else:
|
||||
url = '?'.join((action, urlencode(params, doseq=True)))
|
||||
else:
|
||||
url = action
|
||||
|
||||
# Removed terrible hack...this a less-bad hack that doesn't execute a
|
||||
# request twice, but it's still a hack.
|
||||
self.connect()
|
||||
try:
|
||||
# @TODO: Should we just pass File object as body to request method
|
||||
# instead of dealing with splitting and sending the file ourselves?
|
||||
if raw:
|
||||
self.connection.putrequest(method, url)
|
||||
|
||||
for key, value in list(headers.items()):
|
||||
self.connection.putheader(key, str(value))
|
||||
|
||||
self.connection.endheaders()
|
||||
else:
|
||||
self.connection.request(method=method, url=url, body=data,
|
||||
headers=headers)
|
||||
except ssl.SSLError:
|
||||
e = sys.exc_info()[1]
|
||||
self.reset_context()
|
||||
raise ssl.SSLError(str(e))
|
||||
|
||||
if raw:
|
||||
responseCls = self.rawResponseCls
|
||||
kwargs = {'connection': self}
|
||||
else:
|
||||
responseCls = self.responseCls
|
||||
kwargs = {'connection': self,
|
||||
'response': self.connection.getresponse()}
|
||||
|
||||
try:
|
||||
response = responseCls(**kwargs)
|
||||
finally:
|
||||
# Always reset the context after the request has completed
|
||||
self.reset_context()
|
||||
|
||||
return response
|
||||
|
||||
def morph_action_hook(self, action):
|
||||
return self.request_path + action
|
||||
|
||||
def add_default_params(self, params):
|
||||
"""
|
||||
Adds default parameters (such as API key, version, etc.)
|
||||
to the passed `params`
|
||||
|
||||
Should return a dictionary.
|
||||
"""
|
||||
return params
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
"""
|
||||
Adds default headers (such as Authorization, X-Foo-Bar)
|
||||
to the passed `headers`
|
||||
|
||||
Should return a dictionary.
|
||||
"""
|
||||
return headers
|
||||
|
||||
def pre_connect_hook(self, params, headers):
|
||||
"""
|
||||
A hook which is called before connecting to the remote server.
|
||||
This hook can perform a final manipulation on the params, headers and
|
||||
url parameters.
|
||||
|
||||
:type params: ``dict``
|
||||
:param params: Request parameters.
|
||||
|
||||
:type headers: ``dict``
|
||||
:param headers: Request headers.
|
||||
"""
|
||||
return params, headers
|
||||
|
||||
def encode_data(self, data):
|
||||
"""
|
||||
Encode body data.
|
||||
|
||||
Override in a provider's subclass.
|
||||
"""
|
||||
return data
|
||||
|
||||
def _add_cache_busting_to_params(self, params):
|
||||
"""
|
||||
Add cache busting parameter to the query parameters of a GET request.
|
||||
|
||||
Parameters are only added if "cache_busting" class attribute is set to
|
||||
True.
|
||||
|
||||
Note: This should only be used with *naughty* providers which use
|
||||
excessive caching of responses.
|
||||
"""
|
||||
cache_busting_value = binascii.hexlify(os.urandom(8)).decode('ascii')
|
||||
|
||||
if isinstance(params, dict):
|
||||
params['cache-busting'] = cache_busting_value
|
||||
else:
|
||||
params.append(('cache-busting', cache_busting_value))
|
||||
|
||||
return params
|
||||
|
||||
|
||||
class PollingConnection(Connection):
|
||||
"""
|
||||
Connection class which can also work with the async APIs.
|
||||
|
||||
After initial requests, this class periodically polls for jobs status and
|
||||
waits until the job has finished.
|
||||
If job doesn't finish in timeout seconds, an Exception thrown.
|
||||
"""
|
||||
poll_interval = 0.5
|
||||
timeout = 200
|
||||
request_method = 'request'
|
||||
|
||||
def async_request(self, action, params=None, data=None, headers=None,
|
||||
method='GET', context=None):
|
||||
"""
|
||||
Perform an 'async' request to the specified path. Keep in mind that
|
||||
this function is *blocking* and 'async' in this case means that the
|
||||
hit URL only returns a job ID which is the periodically polled until
|
||||
the job has completed.
|
||||
|
||||
This function works like this:
|
||||
|
||||
- Perform a request to the specified path. Response should contain a
|
||||
'job_id'.
|
||||
|
||||
- Returned 'job_id' is then used to construct a URL which is used for
|
||||
retrieving job status. Constructed URL is then periodically polled
|
||||
until the response indicates that the job has completed or the
|
||||
timeout of 'self.timeout' seconds has been reached.
|
||||
|
||||
:type action: ``str``
|
||||
:param action: A path
|
||||
|
||||
:type params: ``dict``
|
||||
:param params: Optional mapping of additional parameters to send. If
|
||||
None, leave as an empty ``dict``.
|
||||
|
||||
:type data: ``unicode``
|
||||
:param data: A body of data to send with the request.
|
||||
|
||||
:type headers: ``dict``
|
||||
:param headers: Extra headers to add to the request
|
||||
None, leave as an empty ``dict``.
|
||||
|
||||
:type method: ``str``
|
||||
:param method: An HTTP method such as "GET" or "POST".
|
||||
|
||||
:type context: ``dict``
|
||||
:param context: Context dictionary which is passed to the functions
|
||||
which construct initial and poll URL.
|
||||
|
||||
:return: An :class:`Response` instance.
|
||||
:rtype: :class:`Response` instance
|
||||
"""
|
||||
|
||||
request = getattr(self, self.request_method)
|
||||
kwargs = self.get_request_kwargs(action=action, params=params,
|
||||
data=data, headers=headers,
|
||||
method=method,
|
||||
context=context)
|
||||
response = request(**kwargs)
|
||||
kwargs = self.get_poll_request_kwargs(response=response,
|
||||
context=context,
|
||||
request_kwargs=kwargs)
|
||||
|
||||
end = time.time() + self.timeout
|
||||
completed = False
|
||||
while time.time() < end and not completed:
|
||||
response = request(**kwargs)
|
||||
completed = self.has_completed(response=response)
|
||||
if not completed:
|
||||
time.sleep(self.poll_interval)
|
||||
|
||||
if not completed:
|
||||
raise LibcloudError('Job did not complete in %s seconds' %
|
||||
(self.timeout))
|
||||
|
||||
return response
|
||||
|
||||
def get_request_kwargs(self, action, params=None, data=None, headers=None,
|
||||
method='GET', context=None):
|
||||
"""
|
||||
Arguments which are passed to the initial request() call inside
|
||||
async_request.
|
||||
"""
|
||||
kwargs = {'action': action, 'params': params, 'data': data,
|
||||
'headers': headers, 'method': method}
|
||||
return kwargs
|
||||
|
||||
def get_poll_request_kwargs(self, response, context, request_kwargs):
|
||||
"""
|
||||
Return keyword arguments which are passed to the request() method when
|
||||
polling for the job status.
|
||||
|
||||
:param response: Response object returned by poll request.
|
||||
:type response: :class:`HTTPResponse`
|
||||
|
||||
:param request_kwargs: Kwargs previously used to initiate the
|
||||
poll request.
|
||||
:type response: ``dict``
|
||||
|
||||
:return ``dict`` Keyword arguments
|
||||
"""
|
||||
raise NotImplementedError('get_poll_request_kwargs not implemented')
|
||||
|
||||
def has_completed(self, response):
|
||||
"""
|
||||
Return job completion status.
|
||||
|
||||
:param response: Response object returned by poll request.
|
||||
:type response: :class:`HTTPResponse`
|
||||
|
||||
:return ``bool`` True if the job has completed, False otherwise.
|
||||
"""
|
||||
raise NotImplementedError('has_completed not implemented')
|
||||
|
||||
|
||||
class ConnectionKey(Connection):
|
||||
"""
|
||||
Base connection class which accepts a single ``key`` argument.
|
||||
"""
|
||||
def __init__(self, key, secure=True, host=None, port=None, url=None,
|
||||
timeout=None):
|
||||
"""
|
||||
Initialize `user_id` and `key`; set `secure` to an ``int`` based on
|
||||
passed value.
|
||||
"""
|
||||
super(ConnectionKey, self).__init__(secure=secure, host=host,
|
||||
port=port, url=url,
|
||||
timeout=timeout)
|
||||
self.key = key
|
||||
|
||||
|
||||
class ConnectionUserAndKey(ConnectionKey):
|
||||
"""
|
||||
Base connection class which accepts a ``user_id`` and ``key`` argument.
|
||||
"""
|
||||
|
||||
user_id = None
|
||||
|
||||
def __init__(self, user_id, key, secure=True,
|
||||
host=None, port=None, url=None, timeout=None):
|
||||
super(ConnectionUserAndKey, self).__init__(key, secure=secure,
|
||||
host=host, port=port,
|
||||
url=url, timeout=timeout)
|
||||
self.user_id = user_id
|
||||
|
||||
|
||||
class BaseDriver(object):
|
||||
"""
|
||||
Base driver class from which other classes can inherit from.
|
||||
"""
|
||||
|
||||
connectionCls = ConnectionKey
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
api_version=None, region=None, **kwargs):
|
||||
"""
|
||||
:param key: API key or username to be used (required)
|
||||
:type key: ``str``
|
||||
|
||||
:param secret: Secret password to be used (required)
|
||||
:type secret: ``str``
|
||||
|
||||
:param secure: Weither to use HTTPS or HTTP. Note: Some providers
|
||||
only support HTTPS, and it is on by default.
|
||||
:type secure: ``bool``
|
||||
|
||||
:param host: Override hostname used for connections.
|
||||
:type host: ``str``
|
||||
|
||||
:param port: Override port used for connections.
|
||||
:type port: ``int``
|
||||
|
||||
:param api_version: Optional API version. Only used by drivers
|
||||
which support multiple API versions.
|
||||
:type api_version: ``str``
|
||||
|
||||
:param region: Optional driver region. Only used by drivers which
|
||||
support multiple regions.
|
||||
:type region: ``str``
|
||||
|
||||
:rtype: ``None``
|
||||
"""
|
||||
|
||||
self.key = key
|
||||
self.secret = secret
|
||||
self.secure = secure
|
||||
args = [self.key]
|
||||
|
||||
if self.secret is not None:
|
||||
args.append(self.secret)
|
||||
|
||||
args.append(secure)
|
||||
|
||||
if host is not None:
|
||||
args.append(host)
|
||||
|
||||
if port is not None:
|
||||
args.append(port)
|
||||
|
||||
self.api_version = api_version
|
||||
self.region = region
|
||||
|
||||
conn_kwargs = self._ex_connection_class_kwargs()
|
||||
self.connection = self.connectionCls(*args, **conn_kwargs)
|
||||
|
||||
self.connection.driver = self
|
||||
self.connection.connect()
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
"""
|
||||
Return extra connection keyword arguments which are passed to the
|
||||
Connection class constructor.
|
||||
"""
|
||||
return {}
|
||||
101
awx/lib/site-packages/libcloud/common/brightbox.py
Normal file
101
awx/lib/site-packages/libcloud/common/brightbox.py
Normal file
@@ -0,0 +1,101 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
|
||||
from libcloud.compute.types import InvalidCredsError
|
||||
|
||||
from libcloud.utils.py3 import b
|
||||
from libcloud.utils.py3 import httplib
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
|
||||
class BrightboxResponse(JsonResponse):
|
||||
def success(self):
|
||||
return self.status >= httplib.OK and self.status < httplib.BAD_REQUEST
|
||||
|
||||
def parse_body(self):
|
||||
if self.headers['content-type'].split(';')[0] == 'application/json':
|
||||
return super(BrightboxResponse, self).parse_body()
|
||||
else:
|
||||
return self.body
|
||||
|
||||
def parse_error(self):
|
||||
response = super(BrightboxResponse, self).parse_body()
|
||||
|
||||
if 'error' in response:
|
||||
if response['error'] in ['invalid_client', 'unauthorized_client']:
|
||||
raise InvalidCredsError(response['error'])
|
||||
|
||||
return response['error']
|
||||
elif 'error_name' in response:
|
||||
return '%s: %s' % (response['error_name'], response['errors'][0])
|
||||
|
||||
return self.body
|
||||
|
||||
|
||||
class BrightboxConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the Brightbox driver
|
||||
"""
|
||||
|
||||
host = 'api.gb1.brightbox.com'
|
||||
responseCls = BrightboxResponse
|
||||
|
||||
def _fetch_oauth_token(self):
|
||||
body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'})
|
||||
|
||||
authorization = 'Basic ' + str(base64.encodestring(b('%s:%s' %
|
||||
(self.user_id, self.key)))).rstrip()
|
||||
|
||||
self.connect()
|
||||
|
||||
headers = {
|
||||
'Host': self.host,
|
||||
'User-Agent': self._user_agent(),
|
||||
'Authorization': authorization,
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': str(len(body))
|
||||
}
|
||||
|
||||
response = self.connection.request(method='POST', url='/token',
|
||||
body=body, headers=headers)
|
||||
|
||||
response = self.connection.getresponse()
|
||||
|
||||
if response.status == httplib.OK:
|
||||
return json.loads(response.read())['access_token']
|
||||
else:
|
||||
responseCls = BrightboxResponse(response=response, connection=self)
|
||||
message = responseCls.parse_error()
|
||||
raise InvalidCredsError(message)
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
try:
|
||||
headers['Authorization'] = 'OAuth ' + self.token
|
||||
except AttributeError:
|
||||
self.token = self._fetch_oauth_token()
|
||||
|
||||
headers['Authorization'] = 'OAuth ' + self.token
|
||||
|
||||
return headers
|
||||
|
||||
def encode_data(self, data):
|
||||
return json.dumps(data)
|
||||
146
awx/lib/site-packages/libcloud/common/cloudsigma.py
Normal file
146
awx/lib/site-packages/libcloud/common/cloudsigma.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'API_ENDPOINTS_1_0',
|
||||
'API_ENDPOINTS_2_0',
|
||||
'API_VERSIONS',
|
||||
'INSTANCE_TYPES'
|
||||
]
|
||||
|
||||
# API end-points
|
||||
API_ENDPOINTS_1_0 = {
|
||||
'zrh': {
|
||||
'name': 'Zurich',
|
||||
'country': 'Switzerland',
|
||||
'host': 'api.zrh.cloudsigma.com'
|
||||
},
|
||||
'lvs': {
|
||||
'name': 'Las Vegas',
|
||||
'country': 'United States',
|
||||
'host': 'api.lvs.cloudsigma.com'
|
||||
}
|
||||
}
|
||||
|
||||
API_ENDPOINTS_2_0 = {
|
||||
'zrh': {
|
||||
'name': 'Zurich',
|
||||
'country': 'Switzerland',
|
||||
'host': 'zrh.cloudsigma.com'
|
||||
},
|
||||
'lvs': {
|
||||
'name': 'Las Vegas',
|
||||
'country': 'United States',
|
||||
'host': 'lvs.cloudsigma.com'
|
||||
},
|
||||
'wdc': {
|
||||
'name': 'Washington DC',
|
||||
'country': 'United States',
|
||||
'host': 'wdc.cloudsigma.com'
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
DEFAULT_REGION = 'zrh'
|
||||
|
||||
# Supported API versions.
|
||||
API_VERSIONS = [
|
||||
'1.0' # old and deprecated
|
||||
'2.0'
|
||||
]
|
||||
|
||||
DEFAULT_API_VERSION = '2.0'
|
||||
|
||||
# CloudSigma doesn't specify special instance types.
|
||||
# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work,
|
||||
# 500 MB to 32000 MB for ram
|
||||
# and 1 GB to 1024 GB for hard drive size.
|
||||
# Plans in this file are based on examples listed on http://www.cloudsigma
|
||||
# .com/en/pricing/price-schedules
|
||||
INSTANCE_TYPES = [
|
||||
{
|
||||
'id': 'micro-regular',
|
||||
'name': 'Micro/Regular instance',
|
||||
'cpu': 1100,
|
||||
'memory': 640,
|
||||
'disk': 10 + 3,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'micro-high-cpu',
|
||||
'name': 'Micro/High CPU instance',
|
||||
'cpu': 2200,
|
||||
'memory': 640,
|
||||
'disk': 80,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'standard-small',
|
||||
'name': 'Standard/Small instance',
|
||||
'cpu': 1100,
|
||||
'memory': 1741,
|
||||
'disk': 50,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'standard-large',
|
||||
'name': 'Standard/Large instance',
|
||||
'cpu': 4400,
|
||||
'memory': 7680,
|
||||
'disk': 250,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'standard-extra-large',
|
||||
'name': 'Standard/Extra Large instance',
|
||||
'cpu': 8800,
|
||||
'memory': 15360,
|
||||
'disk': 500,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'high-memory-extra-large',
|
||||
'name': 'High Memory/Extra Large instance',
|
||||
'cpu': 7150,
|
||||
'memory': 17510,
|
||||
'disk': 250,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'high-memory-double-extra-large',
|
||||
'name': 'High Memory/Double Extra Large instance',
|
||||
'cpu': 14300,
|
||||
'memory': 32768,
|
||||
'disk': 500,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'high-cpu-medium',
|
||||
'name': 'High CPU/Medium instance',
|
||||
'cpu': 5500,
|
||||
'memory': 1741,
|
||||
'disk': 150,
|
||||
'bandwidth': None,
|
||||
},
|
||||
{
|
||||
'id': 'high-cpu-extra-large',
|
||||
'name': 'High CPU/Extra Large instance',
|
||||
'cpu': 20000,
|
||||
'memory': 7168,
|
||||
'disk': 500,
|
||||
'bandwidth': None,
|
||||
}
|
||||
]
|
||||
195
awx/lib/site-packages/libcloud/common/cloudstack.py
Normal file
195
awx/lib/site-packages/libcloud/common/cloudstack.py
Normal file
@@ -0,0 +1,195 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import copy
|
||||
import hmac
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import urlencode
|
||||
from libcloud.utils.py3 import urlquote
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.types import ProviderError
|
||||
from libcloud.common.base import ConnectionUserAndKey, PollingConnection
|
||||
from libcloud.common.base import JsonResponse
|
||||
from libcloud.common.types import MalformedResponseError
|
||||
from libcloud.compute.types import InvalidCredsError
|
||||
|
||||
|
||||
class CloudStackResponse(JsonResponse):
|
||||
def parse_error(self):
|
||||
if self.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError('Invalid provider credentials')
|
||||
|
||||
body = self.parse_body()
|
||||
values = list(body.values())[0]
|
||||
|
||||
if 'errortext' in values:
|
||||
value = values['errortext']
|
||||
else:
|
||||
value = self.body
|
||||
|
||||
error = ProviderError(value=value, http_code=self.status,
|
||||
driver=self.connection.driver)
|
||||
raise error
|
||||
|
||||
|
||||
class CloudStackConnection(ConnectionUserAndKey, PollingConnection):
|
||||
responseCls = CloudStackResponse
|
||||
poll_interval = 1
|
||||
request_method = '_sync_request'
|
||||
timeout = 600
|
||||
|
||||
ASYNC_PENDING = 0
|
||||
ASYNC_SUCCESS = 1
|
||||
ASYNC_FAILURE = 2
|
||||
|
||||
def encode_data(self, data):
|
||||
"""
|
||||
Must of the data is sent as part of query params (eeww),
|
||||
but in newer versions, userdata argument can be sent as a
|
||||
urlencoded data in the request body.
|
||||
"""
|
||||
if data:
|
||||
data = urlencode(data)
|
||||
|
||||
return data
|
||||
|
||||
def _make_signature(self, params):
|
||||
signature = [(k.lower(), v) for k, v in list(params.items())]
|
||||
signature.sort(key=lambda x: x[0])
|
||||
|
||||
pairs = []
|
||||
for pair in signature:
|
||||
key = urlquote(str(pair[0]), safe='[]')
|
||||
value = urlquote(str(pair[1]), safe='[]')
|
||||
item = '%s=%s' % (key, value)
|
||||
pairs .append(item)
|
||||
|
||||
signature = '&'.join(pairs)
|
||||
|
||||
signature = signature.lower().replace('+', '%20')
|
||||
signature = hmac.new(b(self.key), msg=b(signature),
|
||||
digestmod=hashlib.sha1)
|
||||
return base64.b64encode(b(signature.digest()))
|
||||
|
||||
def add_default_params(self, params):
|
||||
params['apiKey'] = self.user_id
|
||||
params['response'] = 'json'
|
||||
|
||||
return params
|
||||
|
||||
def pre_connect_hook(self, params, headers):
|
||||
params['signature'] = self._make_signature(params)
|
||||
|
||||
return params, headers
|
||||
|
||||
def _async_request(self, command, action=None, params=None, data=None,
|
||||
headers=None, method='GET', context=None):
|
||||
if params:
|
||||
context = copy.deepcopy(params)
|
||||
else:
|
||||
context = {}
|
||||
|
||||
# Command is specified as part of GET call
|
||||
context['command'] = command
|
||||
result = super(CloudStackConnection, self).async_request(
|
||||
action=action, params=params, data=data, headers=headers,
|
||||
method=method, context=context)
|
||||
return result['jobresult']
|
||||
|
||||
def get_request_kwargs(self, action, params=None, data='', headers=None,
|
||||
method='GET', context=None):
|
||||
command = context['command']
|
||||
request_kwargs = {'command': command, 'action': action,
|
||||
'params': params, 'data': data,
|
||||
'headers': headers, 'method': method}
|
||||
return request_kwargs
|
||||
|
||||
def get_poll_request_kwargs(self, response, context, request_kwargs):
|
||||
job_id = response['jobid']
|
||||
params = {'jobid': job_id}
|
||||
kwargs = {'command': 'queryAsyncJobResult', 'params': params}
|
||||
return kwargs
|
||||
|
||||
def has_completed(self, response):
|
||||
status = response.get('jobstatus', self.ASYNC_PENDING)
|
||||
|
||||
if status == self.ASYNC_FAILURE:
|
||||
msg = response.get('jobresult', {}).get('errortext', status)
|
||||
raise Exception(msg)
|
||||
|
||||
return status == self.ASYNC_SUCCESS
|
||||
|
||||
def _sync_request(self, command, action=None, params=None, data=None,
|
||||
headers=None, method='GET'):
|
||||
"""
|
||||
This method handles synchronous calls which are generally fast
|
||||
information retrieval requests and thus return 'quickly'.
|
||||
"""
|
||||
# command is always sent as part of "command" query parameter
|
||||
if params:
|
||||
params = copy.deepcopy(params)
|
||||
else:
|
||||
params = {}
|
||||
|
||||
params['command'] = command
|
||||
result = self.request(action=self.driver.path, params=params,
|
||||
data=data, headers=headers, method=method)
|
||||
|
||||
command = command.lower()
|
||||
|
||||
# Work around for older verions which don't return "response" suffix
|
||||
# in delete ingress rule response command name
|
||||
if (command == 'revokesecuritygroupingress' and
|
||||
'revokesecuritygroupingressresponse' not in result.object):
|
||||
command = command
|
||||
else:
|
||||
command = command + 'response'
|
||||
|
||||
if command not in result.object:
|
||||
raise MalformedResponseError(
|
||||
"Unknown response format",
|
||||
body=result.body,
|
||||
driver=self.driver)
|
||||
result = result.object[command]
|
||||
return result
|
||||
|
||||
|
||||
class CloudStackDriverMixIn(object):
|
||||
host = None
|
||||
path = None
|
||||
|
||||
connectionCls = CloudStackConnection
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None):
|
||||
host = host or self.host
|
||||
super(CloudStackDriverMixIn, self).__init__(key, secret, secure, host,
|
||||
port)
|
||||
|
||||
def _sync_request(self, command, action=None, params=None, data=None,
|
||||
headers=None, method='GET'):
|
||||
return self.connection._sync_request(command=command, action=action,
|
||||
params=params, data=data,
|
||||
headers=headers, method=method)
|
||||
|
||||
def _async_request(self, command, action=None, params=None, data=None,
|
||||
headers=None, method='GET', context=None):
|
||||
return self.connection._async_request(command=command, action=action,
|
||||
params=params, data=data,
|
||||
headers=headers, method=method,
|
||||
context=context)
|
||||
189
awx/lib/site-packages/libcloud/common/gandi.py
Normal file
189
awx/lib/site-packages/libcloud/common/gandi.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Gandi driver base classes
|
||||
"""
|
||||
|
||||
import time
|
||||
import hashlib
|
||||
import sys
|
||||
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.base import ConnectionKey
|
||||
from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
|
||||
|
||||
# Global constants
|
||||
|
||||
DEFAULT_TIMEOUT = 600 # operation pooling max seconds
|
||||
DEFAULT_INTERVAL = 20 # seconds between 2 operation.info
|
||||
|
||||
|
||||
class GandiException(Exception):
|
||||
"""
|
||||
Exception class for Gandi driver
|
||||
"""
|
||||
def __str__(self):
|
||||
return '(%u) %s' % (self.args[0], self.args[1])
|
||||
|
||||
def __repr__(self):
|
||||
return '<GandiException code %u "%s">' % (self.args[0], self.args[1])
|
||||
|
||||
|
||||
class GandiResponse(XMLRPCResponse):
|
||||
"""
|
||||
A Base Gandi Response class to derive from.
|
||||
"""
|
||||
|
||||
|
||||
class GandiConnection(XMLRPCConnection, ConnectionKey):
|
||||
"""
|
||||
Connection class for the Gandi driver
|
||||
"""
|
||||
|
||||
responseCls = GandiResponse
|
||||
host = 'rpc.gandi.net'
|
||||
endpoint = '/xmlrpc/'
|
||||
|
||||
def __init__(self, key, secure=True):
|
||||
# Note: Method resolution order in this case is
|
||||
# XMLRPCConnection -> Connection and Connection doesn't take key as the
|
||||
# first argument so we specify a keyword argument instead.
|
||||
# Previously it was GandiConnection -> ConnectionKey so it worked fine.
|
||||
super(GandiConnection, self).__init__(key=key, secure=secure)
|
||||
self.driver = BaseGandiDriver
|
||||
|
||||
def request(self, method, *args):
|
||||
args = (self.key, ) + args
|
||||
return super(GandiConnection, self).request(method, *args)
|
||||
|
||||
|
||||
class BaseGandiDriver(object):
|
||||
"""
|
||||
Gandi base driver
|
||||
|
||||
"""
|
||||
connectionCls = GandiConnection
|
||||
name = 'Gandi'
|
||||
|
||||
# Specific methods for gandi
|
||||
def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT,
|
||||
check_interval=DEFAULT_INTERVAL):
|
||||
""" Wait for an operation to succeed"""
|
||||
|
||||
for i in range(0, timeout, check_interval):
|
||||
try:
|
||||
op = self.connection.request('operation.info', int(id)).object
|
||||
|
||||
if op['step'] == 'DONE':
|
||||
return True
|
||||
if op['step'] in ['ERROR', 'CANCEL']:
|
||||
return False
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
raise GandiException(1002, e)
|
||||
|
||||
time.sleep(check_interval)
|
||||
return False
|
||||
|
||||
|
||||
class BaseObject(object):
|
||||
"""Base class for objects not conventional"""
|
||||
|
||||
uuid_prefix = ''
|
||||
|
||||
def __init__(self, id, state, driver):
|
||||
self.id = str(id) if id else None
|
||||
self.state = state
|
||||
self.driver = driver
|
||||
self.uuid = self.get_uuid()
|
||||
|
||||
def get_uuid(self):
|
||||
"""Unique hash for this object
|
||||
|
||||
:return: ``str``
|
||||
|
||||
The hash is a function of an SHA1 hash of prefix, the object's ID and
|
||||
its driver which means that it should be unique between all
|
||||
interfaces.
|
||||
TODO : to review
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> vif = driver.create_interface()
|
||||
>>> vif.get_uuid()
|
||||
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
|
||||
|
||||
Note, for example, that this example will always produce the
|
||||
same UUID!
|
||||
"""
|
||||
hashstring = '%s:%s:%s' % \
|
||||
(self.uuid_prefix, self.id, self.driver.type)
|
||||
return hashlib.sha1(b(hashstring)).hexdigest()
|
||||
|
||||
|
||||
class IPAddress(BaseObject):
|
||||
"""
|
||||
Provide a common interface for ip addresses
|
||||
"""
|
||||
|
||||
uuid_prefix = 'inet:'
|
||||
|
||||
def __init__(self, id, state, inet, driver, version=4, extra=None):
|
||||
super(IPAddress, self).__init__(id, state, driver)
|
||||
self.inet = inet
|
||||
self.version = version
|
||||
self.extra = extra or {}
|
||||
|
||||
def __repr__(self):
|
||||
return (('<IPAddress: id=%s, address=%s, state=%s, driver=%s ...>')
|
||||
% (self.id, self.inet, self.state, self.driver.name))
|
||||
|
||||
|
||||
class NetworkInterface(BaseObject):
|
||||
"""
|
||||
Provide a common interface for network interfaces
|
||||
"""
|
||||
|
||||
uuid_prefix = 'if:'
|
||||
|
||||
def __init__(self, id, state, mac_address, driver,
|
||||
ips=None, node_id=None, extra=None):
|
||||
super(NetworkInterface, self).__init__(id, state, driver)
|
||||
self.mac = mac_address
|
||||
self.ips = ips or {}
|
||||
self.node_id = node_id
|
||||
self.extra = extra or {}
|
||||
|
||||
def __repr__(self):
|
||||
return (('<Interface: id=%s, mac=%s, state=%s, driver=%s ...>')
|
||||
% (self.id, self.mac, self.state, self.driver.name))
|
||||
|
||||
|
||||
class Disk(BaseObject):
|
||||
"""
|
||||
Gandi disk component
|
||||
"""
|
||||
def __init__(self, id, state, name, driver, size, extra=None):
|
||||
super(Disk, self).__init__(id, state, driver)
|
||||
self.name = name
|
||||
self.size = size
|
||||
self.extra = extra or {}
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
('<Disk: id=%s, name=%s, state=%s, size=%s, driver=%s ...>')
|
||||
% (self.id, self.name, self.state, self.size, self.driver.name))
|
||||
183
awx/lib/site-packages/libcloud/common/gogrid.py
Normal file
183
awx/lib/site-packages/libcloud/common/gogrid.py
Normal file
@@ -0,0 +1,183 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.common.types import MalformedResponseError
|
||||
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
|
||||
from libcloud.compute.base import NodeLocation
|
||||
|
||||
HOST = 'api.gogrid.com'
|
||||
PORTS_BY_SECURITY = {True: 443, False: 80}
|
||||
API_VERSION = '1.8'
|
||||
|
||||
__all__ = [
|
||||
"GoGridResponse",
|
||||
"GoGridConnection",
|
||||
"GoGridIpAddress",
|
||||
"BaseGoGridDriver",
|
||||
]
|
||||
|
||||
|
||||
class GoGridResponse(JsonResponse):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.driver = BaseGoGridDriver
|
||||
super(GoGridResponse, self).__init__(*args, **kwargs)
|
||||
|
||||
def success(self):
|
||||
if self.status == 403:
|
||||
raise InvalidCredsError('Invalid credentials', self.driver)
|
||||
if self.status == 401:
|
||||
raise InvalidCredsError('API Key has insufficient rights',
|
||||
self.driver)
|
||||
if not self.body:
|
||||
return None
|
||||
try:
|
||||
return self.parse_body()['status'] == 'success'
|
||||
except ValueError:
|
||||
raise MalformedResponseError('Malformed reply',
|
||||
body=self.body,
|
||||
driver=self.driver)
|
||||
|
||||
def parse_error(self):
|
||||
try:
|
||||
return self.parse_body()["list"][0]["message"]
|
||||
except (ValueError, KeyError):
|
||||
return None
|
||||
|
||||
|
||||
class GoGridConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the GoGrid driver
|
||||
"""
|
||||
|
||||
host = HOST
|
||||
responseCls = GoGridResponse
|
||||
|
||||
def add_default_params(self, params):
|
||||
params["api_key"] = self.user_id
|
||||
params["v"] = API_VERSION
|
||||
params["format"] = 'json'
|
||||
params["sig"] = self.get_signature(self.user_id, self.key)
|
||||
|
||||
return params
|
||||
|
||||
def get_signature(self, key, secret):
|
||||
""" create sig from md5 of key + secret + time """
|
||||
m = hashlib.md5(b(key + secret + str(int(time.time()))))
|
||||
return m.hexdigest()
|
||||
|
||||
def request(self, action, params=None, data='', headers=None, method='GET',
|
||||
raw=False):
|
||||
return super(GoGridConnection, self).request(action, params, data,
|
||||
headers, method, raw)
|
||||
|
||||
|
||||
class GoGridIpAddress(object):
|
||||
"""
|
||||
IP Address
|
||||
"""
|
||||
|
||||
def __init__(self, id, ip, public, state, subnet):
|
||||
self.id = id
|
||||
self.ip = ip
|
||||
self.public = public
|
||||
self.state = state
|
||||
self.subnet = subnet
|
||||
|
||||
|
||||
class BaseGoGridDriver(object):
|
||||
"""GoGrid has common object model for services they
|
||||
provide, like locations and IP, so keep handling of
|
||||
these things in a single place."""
|
||||
|
||||
name = "GoGrid"
|
||||
|
||||
def _get_ip(self, element):
|
||||
return element.get('ip').get('ip')
|
||||
|
||||
def _to_ip(self, element):
|
||||
ip = GoGridIpAddress(id=element['id'],
|
||||
ip=element['ip'],
|
||||
public=element['public'],
|
||||
subnet=element['subnet'],
|
||||
state=element["state"]["name"])
|
||||
ip.location = self._to_location(element['datacenter'])
|
||||
return ip
|
||||
|
||||
def _to_ips(self, object):
|
||||
return [self._to_ip(el)
|
||||
for el in object['list']]
|
||||
|
||||
def _to_location(self, element):
|
||||
location = NodeLocation(id=element['id'],
|
||||
name=element['name'],
|
||||
country="US",
|
||||
driver=self.connection.driver)
|
||||
return location
|
||||
|
||||
def _to_locations(self, object):
|
||||
return [self._to_location(el)
|
||||
for el in object['list']]
|
||||
|
||||
def ex_list_ips(self, **kwargs):
|
||||
"""Return list of IP addresses assigned to
|
||||
the account.
|
||||
|
||||
:keyword public: set to True to list only
|
||||
public IPs or False to list only
|
||||
private IPs. Set to None or not specify
|
||||
at all not to filter by type
|
||||
:type public: ``bool``
|
||||
|
||||
:keyword assigned: set to True to list only addresses
|
||||
assigned to servers, False to list unassigned
|
||||
addresses and set to None or don't set at all
|
||||
not no filter by state
|
||||
:type assigned: ``bool``
|
||||
|
||||
:keyword location: filter IP addresses by location
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:rtype: ``list`` of :class:`GoGridIpAddress`
|
||||
"""
|
||||
|
||||
params = {}
|
||||
|
||||
if "public" in kwargs and kwargs["public"] is not None:
|
||||
params["ip.type"] = {True: "Public",
|
||||
False: "Private"}[kwargs["public"]]
|
||||
if "assigned" in kwargs and kwargs["assigned"] is not None:
|
||||
params["ip.state"] = {True: "Assigned",
|
||||
False: "Unassigned"}[kwargs["assigned"]]
|
||||
if "location" in kwargs and kwargs['location'] is not None:
|
||||
params['datacenter'] = kwargs['location'].id
|
||||
|
||||
response = self.connection.request('/api/grid/ip/list', params=params)
|
||||
ips = self._to_ips(response.object)
|
||||
return ips
|
||||
|
||||
def _get_first_ip(self, location=None):
|
||||
ips = self.ex_list_ips(public=True, assigned=False, location=location)
|
||||
try:
|
||||
return ips[0].ip
|
||||
except IndexError:
|
||||
raise LibcloudError('No public unassigned IPs left',
|
||||
self.driver)
|
||||
671
awx/lib/site-packages/libcloud/common/google.py
Normal file
671
awx/lib/site-packages/libcloud/common/google.py
Normal file
@@ -0,0 +1,671 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Module for Google Connection and Authentication classes.
|
||||
|
||||
Information about setting up your Google OAUTH2 credentials:
|
||||
|
||||
For libcloud, there are two basic methods for authenticating to Google using
|
||||
OAUTH2: Service Accounts and Client IDs for Installed Applications.
|
||||
|
||||
Both are initially set up from the Cloud Console_
|
||||
_Console: https://cloud.google.com/console
|
||||
|
||||
Setting up Service Account authentication (note that you need the PyCrypto
|
||||
package installed to use this):
|
||||
- Go to the Console
|
||||
- Go to your project and then to "APIs & auth" on the left
|
||||
- Click on "Credentials"
|
||||
- Click on "Create New Client ID..."
|
||||
- Select "Service account" and click on "Create Client ID"
|
||||
- Download the Private Key (should happen automatically).
|
||||
- The key that you download is a PKCS12 key. It needs to be converted to
|
||||
the PEM format.
|
||||
- Convert the key using OpenSSL (the default password is 'notasecret'):
|
||||
``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts
|
||||
-passin pass:notasecret | openssl rsa -out PRIV.pem``
|
||||
- Move the .pem file to a safe location.
|
||||
- To Authenticate, you will need to pass the Service Account's "Email
|
||||
address" in as the user_id and the path to the .pem file as the key.
|
||||
|
||||
Setting up Installed Application authentication:
|
||||
- Go to the Console
|
||||
- Go to your project and then to "APIs & auth" on the left
|
||||
- Click on "Credentials"
|
||||
- Select "Installed application" and "Other" then click on
|
||||
"Create Client ID"
|
||||
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
|
||||
secret" as the key
|
||||
- The first time that you do this, the libcloud will give you a URL to
|
||||
visit. Copy and paste the URL into a browser.
|
||||
- When you go to the URL it will ask you to log in (if you aren't already)
|
||||
and ask you if you want to allow the project access to your account.
|
||||
- Click on Accept and you will be given a code.
|
||||
- Paste that code at the prompt given to you by the Google libcloud
|
||||
connection.
|
||||
- At that point, a token & refresh token will be stored in your home
|
||||
directory and will be used for authentication.
|
||||
|
||||
Please remember to secure your keys and access tokens.
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
import base64
|
||||
import errno
|
||||
import time
|
||||
import datetime
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from libcloud.utils.py3 import httplib, urlencode, urlparse, PY3
|
||||
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
|
||||
PollingConnection)
|
||||
from libcloud.common.types import (ProviderError,
|
||||
LibcloudError)
|
||||
|
||||
try:
|
||||
from Crypto.Hash import SHA256
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Signature import PKCS1_v1_5
|
||||
import Crypto.Random
|
||||
Crypto.Random.atfork()
|
||||
except ImportError:
|
||||
# The pycrypto library is unavailable
|
||||
SHA256 = None
|
||||
RSA = None
|
||||
PKCS1_v1_5 = None
|
||||
|
||||
TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
|
||||
|
||||
|
||||
class GoogleAuthError(LibcloudError):
|
||||
"""Generic Error class for various authentication errors."""
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.value)
|
||||
|
||||
|
||||
class GoogleBaseError(ProviderError):
|
||||
def __init__(self, value, http_code, code, driver=None):
|
||||
self.code = code
|
||||
super(GoogleBaseError, self).__init__(value, http_code, driver)
|
||||
|
||||
|
||||
class InvalidRequestError(GoogleBaseError):
|
||||
pass
|
||||
|
||||
|
||||
class JsonParseError(GoogleBaseError):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceNotFoundError(GoogleBaseError):
|
||||
pass
|
||||
|
||||
|
||||
class QuotaExceededError(GoogleBaseError):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceExistsError(GoogleBaseError):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceInUseError(GoogleBaseError):
|
||||
pass
|
||||
|
||||
|
||||
class GoogleResponse(JsonResponse):
|
||||
"""
|
||||
Google Base Response class.
|
||||
"""
|
||||
def success(self):
|
||||
"""
|
||||
Determine if the request was successful.
|
||||
|
||||
For the Google response class, tag all responses as successful and
|
||||
raise appropriate Exceptions from parse_body.
|
||||
|
||||
:return: C{True}
|
||||
"""
|
||||
return True
|
||||
|
||||
def _get_error(self, body):
|
||||
"""
|
||||
Get the error code and message from a JSON response.
|
||||
|
||||
Return just the first error if there are multiple errors.
|
||||
|
||||
:param body: The body of the JSON response dictionary
|
||||
:type body: ``dict``
|
||||
|
||||
:return: Tuple containing error code and message
|
||||
:rtype: ``tuple`` of ``str`` or ``int``
|
||||
"""
|
||||
if 'errors' in body['error']:
|
||||
err = body['error']['errors'][0]
|
||||
else:
|
||||
err = body['error']
|
||||
|
||||
if 'code' in err:
|
||||
code = err.get('code')
|
||||
message = err.get('message')
|
||||
else:
|
||||
code = None
|
||||
message = body.get('error_description', err)
|
||||
|
||||
return (code, message)
|
||||
|
||||
def parse_body(self):
|
||||
"""
|
||||
Parse the JSON response body, or raise exceptions as appropriate.
|
||||
|
||||
:return: JSON dictionary
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
if len(self.body) == 0 and not self.parse_zero_length_body:
|
||||
return self.body
|
||||
|
||||
json_error = False
|
||||
try:
|
||||
body = json.loads(self.body)
|
||||
except:
|
||||
# If there is both a JSON parsing error and an unsuccessful http
|
||||
# response (like a 404), we want to raise the http error and not
|
||||
# the JSON one, so don't raise JsonParseError here.
|
||||
body = self.body
|
||||
json_error = True
|
||||
|
||||
if self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]:
|
||||
if json_error:
|
||||
raise JsonParseError(body, self.status, None)
|
||||
elif 'error' in body:
|
||||
(code, message) = self._get_error(body)
|
||||
if code == 'QUOTA_EXCEEDED':
|
||||
raise QuotaExceededError(message, self.status, code)
|
||||
elif code == 'RESOURCE_ALREADY_EXISTS':
|
||||
raise ResourceExistsError(message, self.status, code)
|
||||
elif code.startswith('RESOURCE_IN_USE'):
|
||||
raise ResourceInUseError(message, self.status, code)
|
||||
else:
|
||||
raise GoogleBaseError(message, self.status, code)
|
||||
else:
|
||||
return body
|
||||
|
||||
elif self.status == httplib.NOT_FOUND:
|
||||
if (not json_error) and ('error' in body):
|
||||
(code, message) = self._get_error(body)
|
||||
else:
|
||||
message = body
|
||||
code = None
|
||||
raise ResourceNotFoundError(message, self.status, code)
|
||||
|
||||
elif self.status == httplib.BAD_REQUEST:
|
||||
if (not json_error) and ('error' in body):
|
||||
(code, message) = self._get_error(body)
|
||||
else:
|
||||
message = body
|
||||
code = None
|
||||
raise InvalidRequestError(message, self.status, code)
|
||||
|
||||
else:
|
||||
if (not json_error) and ('error' in body):
|
||||
(code, message) = self._get_error(body)
|
||||
else:
|
||||
message = body
|
||||
code = None
|
||||
raise GoogleBaseError(message, self.status, code)
|
||||
|
||||
|
||||
class GoogleBaseDriver(object):
|
||||
name = "Google API"
|
||||
|
||||
|
||||
class GoogleBaseAuthConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Base class for Google Authentication. Should be subclassed for specific
|
||||
types of authentication.
|
||||
"""
|
||||
driver = GoogleBaseDriver
|
||||
responseCls = GoogleResponse
|
||||
name = 'Google Auth'
|
||||
host = 'accounts.google.com'
|
||||
auth_path = '/o/oauth2/auth'
|
||||
|
||||
def __init__(self, user_id, key, scopes=None,
|
||||
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
|
||||
login_hint=None, **kwargs):
|
||||
"""
|
||||
:param user_id: The email address (for service accounts) or Client ID
|
||||
(for installed apps) to be used for authentication.
|
||||
:type user_id: ``str``
|
||||
|
||||
:param key: The RSA Key (for service accounts) or file path containing
|
||||
key or Client Secret (for installed apps) to be used for
|
||||
authentication.
|
||||
:type key: ``str``
|
||||
|
||||
:param scopes: A list of urls defining the scope of authentication
|
||||
to grant.
|
||||
:type scopes: ``list``
|
||||
|
||||
:keyword redirect_uri: The Redirect URI for the authentication
|
||||
request. See Google OAUTH2 documentation for
|
||||
more info.
|
||||
:type redirect_uri: ``str``
|
||||
|
||||
:keyword login_hint: Login hint for authentication request. Useful
|
||||
for Installed Application authentication.
|
||||
:type login_hint: ``str``
|
||||
"""
|
||||
scopes = scopes or []
|
||||
|
||||
self.scopes = " ".join(scopes)
|
||||
self.redirect_uri = redirect_uri
|
||||
self.login_hint = login_hint
|
||||
|
||||
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
|
||||
|
||||
def _now(self):
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Content-Type'] = "application/x-www-form-urlencoded"
|
||||
headers['Host'] = self.host
|
||||
return headers
|
||||
|
||||
def _token_request(self, request_body):
|
||||
"""
|
||||
Return an updated token from a token request body.
|
||||
|
||||
:param request_body: A dictionary of values to send in the body of the
|
||||
token request.
|
||||
:type request_body: ``dict``
|
||||
|
||||
:return: A dictionary with updated token information
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
data = urlencode(request_body)
|
||||
now = self._now()
|
||||
response = self.request('/o/oauth2/token', method='POST', data=data)
|
||||
token_info = response.object
|
||||
if 'expires_in' in token_info:
|
||||
expire_time = now + datetime.timedelta(
|
||||
seconds=token_info['expires_in'])
|
||||
token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT)
|
||||
return token_info
|
||||
|
||||
|
||||
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
|
||||
"""Authentication connection for "Installed Application" authentication."""
|
||||
def get_code(self):
|
||||
"""
|
||||
Give the user a URL that they can visit to authenticate and obtain a
|
||||
code. This method will ask for that code that the user can paste in.
|
||||
|
||||
:return: Code supplied by the user after authenticating
|
||||
:rtype: ``str``
|
||||
"""
|
||||
auth_params = {'response_type': 'code',
|
||||
'client_id': self.user_id,
|
||||
'redirect_uri': self.redirect_uri,
|
||||
'scope': self.scopes,
|
||||
'state': 'Libcloud Request'}
|
||||
if self.login_hint:
|
||||
auth_params['login_hint'] = self.login_hint
|
||||
|
||||
data = urlencode(auth_params)
|
||||
|
||||
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
|
||||
print('Please Go to the following URL and sign in:')
|
||||
print(url)
|
||||
if PY3:
|
||||
code = input('Enter Code:')
|
||||
else:
|
||||
code = raw_input('Enter Code:')
|
||||
return code
|
||||
|
||||
def get_new_token(self):
|
||||
"""
|
||||
Get a new token. Generally used when no previous token exists or there
|
||||
is no refresh token
|
||||
|
||||
:return: Dictionary containing token information
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
# Ask the user for a code
|
||||
code = self.get_code()
|
||||
|
||||
token_request = {'code': code,
|
||||
'client_id': self.user_id,
|
||||
'client_secret': self.key,
|
||||
'redirect_uri': self.redirect_uri,
|
||||
'grant_type': 'authorization_code'}
|
||||
|
||||
return self._token_request(token_request)
|
||||
|
||||
def refresh_token(self, token_info):
|
||||
"""
|
||||
Use the refresh token supplied in the token info to get a new token.
|
||||
|
||||
:param token_info: Dictionary containing current token information
|
||||
:type token_info: ``dict``
|
||||
|
||||
:return: A dictionary containing updated token information.
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
if 'refresh_token' not in token_info:
|
||||
return self.get_new_token()
|
||||
refresh_request = {'refresh_token': token_info['refresh_token'],
|
||||
'client_id': self.user_id,
|
||||
'client_secret': self.key,
|
||||
'grant_type': 'refresh_token'}
|
||||
|
||||
new_token = self._token_request(refresh_request)
|
||||
if 'refresh_token' not in new_token:
|
||||
new_token['refresh_token'] = token_info['refresh_token']
|
||||
return new_token
|
||||
|
||||
|
||||
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
|
||||
"""Authentication class for "Service Account" authentication."""
|
||||
def __init__(self, user_id, key, *args, **kwargs):
|
||||
"""
|
||||
Check to see if PyCrypto is available, and convert key file path into a
|
||||
key string if the key is in a file.
|
||||
|
||||
:param user_id: Email address to be used for Service Account
|
||||
authentication.
|
||||
:type user_id: ``str``
|
||||
|
||||
:param key: The RSA Key or path to file containing the key.
|
||||
:type key: ``str``
|
||||
"""
|
||||
if SHA256 is None:
|
||||
raise GoogleAuthError('PyCrypto library required for '
|
||||
'Service Account Authentication.')
|
||||
# Check to see if 'key' is a file and read the file if it is.
|
||||
keypath = os.path.expanduser(key)
|
||||
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
|
||||
if is_file_path:
|
||||
with open(keypath, 'r') as f:
|
||||
key = f.read()
|
||||
super(GoogleServiceAcctAuthConnection, self).__init__(
|
||||
user_id, key, *args, **kwargs)
|
||||
|
||||
def get_new_token(self):
|
||||
"""
|
||||
Get a new token using the email address and RSA Key.
|
||||
|
||||
:return: Dictionary containing token information
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
# The header is always the same
|
||||
header = {'alg': 'RS256', 'typ': 'JWT'}
|
||||
header_enc = base64.urlsafe_b64encode(json.dumps(header))
|
||||
|
||||
# Construct a claim set
|
||||
claim_set = {'iss': self.user_id,
|
||||
'scope': self.scopes,
|
||||
'aud': 'https://accounts.google.com/o/oauth2/token',
|
||||
'exp': int(time.time()) + 3600,
|
||||
'iat': int(time.time())}
|
||||
claim_set_enc = base64.urlsafe_b64encode(json.dumps(claim_set))
|
||||
|
||||
# The message contains both the header and claim set
|
||||
message = '%s.%s' % (header_enc, claim_set_enc)
|
||||
# Then the message is signed using the key supplied
|
||||
key = RSA.importKey(self.key)
|
||||
hash_func = SHA256.new(message)
|
||||
signer = PKCS1_v1_5.new(key)
|
||||
signature = base64.urlsafe_b64encode(signer.sign(hash_func))
|
||||
|
||||
# Finally the message and signature are sent to get a token
|
||||
jwt = '%s.%s' % (message, signature)
|
||||
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
|
||||
'assertion': jwt}
|
||||
|
||||
return self._token_request(request)
|
||||
|
||||
def refresh_token(self, token_info):
|
||||
"""
|
||||
Refresh the current token.
|
||||
|
||||
Service Account authentication doesn't supply a "refresh token" so
|
||||
this simply gets a new token using the email address/key.
|
||||
|
||||
:param token_info: Dictionary containing token information.
|
||||
(Not used, but here for compatibility)
|
||||
:type token_info: ``dict``
|
||||
|
||||
:return: A dictionary containing updated token information.
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
return self.get_new_token()
|
||||
|
||||
|
||||
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
|
||||
"""Base connection class for interacting with Google APIs."""
|
||||
driver = GoogleBaseDriver
|
||||
responseCls = GoogleResponse
|
||||
host = 'www.googleapis.com'
|
||||
poll_interval = 2.0
|
||||
timeout = 180
|
||||
|
||||
def __init__(self, user_id, key, auth_type=None,
|
||||
credential_file=None, scopes=None, **kwargs):
|
||||
"""
|
||||
Determine authentication type, set up appropriate authentication
|
||||
connection and get initial authentication information.
|
||||
|
||||
:param user_id: The email address (for service accounts) or Client ID
|
||||
(for installed apps) to be used for authentication.
|
||||
:type user_id: ``str``
|
||||
|
||||
:param key: The RSA Key (for service accounts) or file path containing
|
||||
key or Client Secret (for installed apps) to be used for
|
||||
authentication.
|
||||
:type key: ``str``
|
||||
|
||||
:keyword auth_type: Accepted values are "SA" or "IA"
|
||||
("Service Account" or "Installed Application").
|
||||
If not supplied, auth_type will be guessed based
|
||||
on value of user_id.
|
||||
:type auth_type: ``str``
|
||||
|
||||
:keyword credential_file: Path to file for caching authentication
|
||||
information.
|
||||
:type credential_file: ``str``
|
||||
|
||||
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
|
||||
read/write access to Compute, Storage, and DNS.
|
||||
:type scopes: ``list``
|
||||
"""
|
||||
self.credential_file = credential_file or '~/.gce_libcloud_auth'
|
||||
|
||||
if auth_type is None:
|
||||
# Try to guess. Service accounts use an email address
|
||||
# as the user id.
|
||||
if '@' in user_id:
|
||||
auth_type = 'SA'
|
||||
else:
|
||||
auth_type = 'IA'
|
||||
|
||||
# Default scopes to read/write for compute, storage, and dns. Can
|
||||
# override this when calling get_driver() or setting in secrets.py
|
||||
self.scopes = scopes
|
||||
if not self.scopes:
|
||||
self.scopes = [
|
||||
'https://www.googleapis.com/auth/compute',
|
||||
'https://www.googleapis.com/auth/devstorage.full_control',
|
||||
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
|
||||
]
|
||||
self.token_info = self._get_token_info_from_file()
|
||||
|
||||
if auth_type == 'SA':
|
||||
self.auth_conn = GoogleServiceAcctAuthConnection(
|
||||
user_id, key, self.scopes, **kwargs)
|
||||
elif auth_type == 'IA':
|
||||
self.auth_conn = GoogleInstalledAppAuthConnection(
|
||||
user_id, key, self.scopes, **kwargs)
|
||||
else:
|
||||
raise GoogleAuthError('auth_type should be \'SA\' or \'IA\'')
|
||||
|
||||
if self.token_info is None:
|
||||
self.token_info = self.auth_conn.get_new_token()
|
||||
self._write_token_info_to_file()
|
||||
|
||||
self.token_expire_time = datetime.datetime.strptime(
|
||||
self.token_info['expire_time'], TIMESTAMP_FORMAT)
|
||||
|
||||
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
|
||||
|
||||
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
|
||||
sys.version_info[2])
|
||||
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
|
||||
self.user_agent_append(ver_platform)
|
||||
|
||||
def _now(self):
|
||||
return datetime.datetime.utcnow()
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
"""
|
||||
@inherits: :class:`Connection.add_default_headers`
|
||||
"""
|
||||
headers['Content-Type'] = "application/json"
|
||||
headers['Host'] = self.host
|
||||
return headers
|
||||
|
||||
def pre_connect_hook(self, params, headers):
|
||||
"""
|
||||
Check to make sure that token hasn't expired. If it has, get an
|
||||
updated token. Also, add the token to the headers.
|
||||
|
||||
@inherits: :class:`Connection.pre_connect_hook`
|
||||
"""
|
||||
now = self._now()
|
||||
if self.token_expire_time < now:
|
||||
self.token_info = self.auth_conn.refresh_token(self.token_info)
|
||||
self.token_expire_time = datetime.datetime.strptime(
|
||||
self.token_info['expire_time'], TIMESTAMP_FORMAT)
|
||||
self._write_token_info_to_file()
|
||||
headers['Authorization'] = 'Bearer %s' % (
|
||||
self.token_info['access_token'])
|
||||
|
||||
return params, headers
|
||||
|
||||
def encode_data(self, data):
|
||||
"""Encode data to JSON"""
|
||||
return json.dumps(data)
|
||||
|
||||
def request(self, *args, **kwargs):
|
||||
"""
|
||||
@inherits: :class:`Connection.request`
|
||||
"""
|
||||
# Adds some retry logic for the occasional
|
||||
# "Connection Reset by peer" error.
|
||||
retries = 4
|
||||
tries = 0
|
||||
while tries < (retries - 1):
|
||||
try:
|
||||
return super(GoogleBaseConnection, self).request(
|
||||
*args, **kwargs)
|
||||
except socket.error:
|
||||
e = sys.exc_info()[1]
|
||||
if e.errno == errno.ECONNRESET:
|
||||
tries = tries + 1
|
||||
else:
|
||||
raise e
|
||||
# One more time, then give up.
|
||||
return super(GoogleBaseConnection, self).request(*args, **kwargs)
|
||||
|
||||
def _get_token_info_from_file(self):
|
||||
"""
|
||||
Read credential file and return token information.
|
||||
|
||||
:return: Token information dictionary, or None
|
||||
:rtype: ``dict`` or ``None``
|
||||
"""
|
||||
token_info = None
|
||||
filename = os.path.realpath(os.path.expanduser(self.credential_file))
|
||||
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
data = f.read()
|
||||
token_info = json.loads(data)
|
||||
except IOError:
|
||||
pass
|
||||
return token_info
|
||||
|
||||
def _write_token_info_to_file(self):
|
||||
"""
|
||||
Write token_info to credential file.
|
||||
"""
|
||||
filename = os.path.realpath(os.path.expanduser(self.credential_file))
|
||||
data = json.dumps(self.token_info)
|
||||
with open(filename, 'w') as f:
|
||||
f.write(data)
|
||||
|
||||
def has_completed(self, response):
|
||||
"""
|
||||
Determine if operation has completed based on response.
|
||||
|
||||
:param response: JSON response
|
||||
:type response: I{responseCls}
|
||||
|
||||
:return: True if complete, False otherwise
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
if response.object['status'] == 'DONE':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_poll_request_kwargs(self, response, context, request_kwargs):
|
||||
"""
|
||||
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
|
||||
"""
|
||||
return {'action': response.object['selfLink']}
|
||||
|
||||
def morph_action_hook(self, action):
|
||||
"""
|
||||
Update action to correct request path.
|
||||
|
||||
In many places, the Google API returns a full URL to a resource.
|
||||
This will strip the scheme and host off of the path and just return
|
||||
the request. Otherwise, it will append the base request_path to
|
||||
the action.
|
||||
|
||||
:param action: The action to be called in the http request
|
||||
:type action: ``str``
|
||||
|
||||
:return: The modified request based on the action
|
||||
:rtype: ``str``
|
||||
"""
|
||||
if action.startswith('https://'):
|
||||
u = urlparse.urlsplit(action)
|
||||
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
|
||||
else:
|
||||
request = self.request_path + action
|
||||
return request
|
||||
77
awx/lib/site-packages/libcloud/common/hostvirtual.py
Normal file
77
awx/lib/site-packages/libcloud/common/hostvirtual.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License.You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.common.base import ConnectionKey, JsonResponse
|
||||
from libcloud.compute.types import InvalidCredsError
|
||||
from libcloud.common.types import LibcloudError
|
||||
|
||||
API_HOST = 'vapi.vr.org'
|
||||
|
||||
|
||||
class HostVirtualException(LibcloudError):
|
||||
def __init__(self, code, message):
|
||||
self.code = code
|
||||
self.message = message
|
||||
self.args = (code, message)
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return '<HostVirtualException in %d: %s>' % (self.code, self.message)
|
||||
|
||||
|
||||
class HostVirtualConnection(ConnectionKey):
|
||||
host = API_HOST
|
||||
|
||||
allow_insecure = False
|
||||
|
||||
def add_default_params(self, params):
|
||||
params['key'] = self.key
|
||||
return params
|
||||
|
||||
|
||||
class HostVirtualResponse(JsonResponse):
|
||||
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
|
||||
httplib.NO_CONTENT]
|
||||
|
||||
def parse_body(self):
|
||||
if not self.body:
|
||||
return None
|
||||
|
||||
data = json.loads(self.body)
|
||||
return data
|
||||
|
||||
def parse_error(self):
|
||||
data = self.parse_body()
|
||||
|
||||
if self.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError('%(code)s:%(message)s' % (data['error']))
|
||||
elif self.status == httplib.PRECONDITION_FAILED:
|
||||
raise HostVirtualException(
|
||||
data['error']['code'], data['error']['message'])
|
||||
elif self.status == httplib.NOT_FOUND:
|
||||
raise HostVirtualException(
|
||||
data['error']['code'], data['error']['message'])
|
||||
|
||||
return self.body
|
||||
|
||||
def success(self):
|
||||
return self.status in self.valid_response_codes
|
||||
176
awx/lib/site-packages/libcloud/common/linode.py
Normal file
176
awx/lib/site-packages/libcloud/common/linode.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.common.base import ConnectionKey, JsonResponse
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
|
||||
from libcloud.utils.py3 import PY3
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
__all__ = [
|
||||
'API_HOST',
|
||||
'API_ROOT',
|
||||
'LinodeException',
|
||||
'LinodeResponse',
|
||||
'LinodeConnection'
|
||||
]
|
||||
|
||||
# Endpoint for the Linode API
|
||||
API_HOST = 'api.linode.com'
|
||||
API_ROOT = '/'
|
||||
|
||||
# Constants that map a RAM figure to a PlanID (updated 4/25/14)
|
||||
LINODE_PLAN_IDS = {2048: '1',
|
||||
4096: '3',
|
||||
8192: '5',
|
||||
16384: '6',
|
||||
32768: '7',
|
||||
49152: '8',
|
||||
65536: '9',
|
||||
98304: '11'}
|
||||
|
||||
|
||||
class LinodeException(Exception):
|
||||
"""Error originating from the Linode API
|
||||
|
||||
This class wraps a Linode API error, a list of which is available in the
|
||||
API documentation. All Linode API errors are a numeric code and a
|
||||
human-readable description.
|
||||
"""
|
||||
def __init__(self, code, message):
|
||||
self.code = code
|
||||
self.message = message
|
||||
self.args = (code, message)
|
||||
|
||||
def __str__(self):
|
||||
return "(%u) %s" % (self.code, self.message)
|
||||
|
||||
def __repr__(self):
|
||||
return "<LinodeException code %u '%s'>" % (self.code, self.message)
|
||||
|
||||
|
||||
class LinodeResponse(JsonResponse):
|
||||
"""Linode API response
|
||||
|
||||
Wraps the HTTP response returned by the Linode API, which should be JSON in
|
||||
this structure:
|
||||
|
||||
{
|
||||
"ERRORARRAY": [ ... ],
|
||||
"DATA": [ ... ],
|
||||
"ACTION": " ... "
|
||||
}
|
||||
|
||||
libcloud does not take advantage of batching, so a response will always
|
||||
reflect the above format. A few weird quirks are caught here as well."""
|
||||
def __init__(self, response, connection):
|
||||
"""Instantiate a LinodeResponse from the HTTP response
|
||||
|
||||
:keyword response: The raw response returned by urllib
|
||||
:return: parsed :class:`LinodeResponse`"""
|
||||
|
||||
self.connection = connection
|
||||
|
||||
self.headers = dict(response.getheaders())
|
||||
self.error = response.reason
|
||||
self.status = response.status
|
||||
|
||||
self.body = self._decompress_response(body=response.read(),
|
||||
headers=self.headers)
|
||||
|
||||
if PY3:
|
||||
self.body = b(self.body).decode('utf-8')
|
||||
|
||||
self.invalid = LinodeException(0xFF,
|
||||
"Invalid JSON received from server")
|
||||
|
||||
# Move parse_body() to here; we can't be sure of failure until we've
|
||||
# parsed the body into JSON.
|
||||
self.objects, self.errors = self.parse_body()
|
||||
|
||||
if not self.success():
|
||||
# Raise the first error, as there will usually only be one
|
||||
raise self.errors[0]
|
||||
|
||||
def parse_body(self):
|
||||
"""Parse the body of the response into JSON objects
|
||||
|
||||
If the response chokes the parser, action and data will be returned as
|
||||
None and errorarray will indicate an invalid JSON exception.
|
||||
|
||||
:return: ``list`` of objects and ``list`` of errors"""
|
||||
js = super(LinodeResponse, self).parse_body()
|
||||
|
||||
try:
|
||||
if isinstance(js, dict):
|
||||
# solitary response - promote to list
|
||||
js = [js]
|
||||
ret = []
|
||||
errs = []
|
||||
for obj in js:
|
||||
if ("DATA" not in obj or "ERRORARRAY" not in obj
|
||||
or "ACTION" not in obj):
|
||||
ret.append(None)
|
||||
errs.append(self.invalid)
|
||||
continue
|
||||
ret.append(obj["DATA"])
|
||||
errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"])
|
||||
return (ret, errs)
|
||||
except:
|
||||
return (None, [self.invalid])
|
||||
|
||||
def success(self):
|
||||
"""Check the response for success
|
||||
|
||||
The way we determine success is by the presence of an error in
|
||||
ERRORARRAY. If one is there, we assume the whole request failed.
|
||||
|
||||
:return: ``bool`` indicating a successful request"""
|
||||
return len(self.errors) == 0
|
||||
|
||||
def _make_excp(self, error):
|
||||
"""Convert an API error to a LinodeException instance
|
||||
|
||||
:keyword error: JSON object containing ``ERRORCODE`` and
|
||||
``ERRORMESSAGE``
|
||||
:type error: dict"""
|
||||
if "ERRORCODE" not in error or "ERRORMESSAGE" not in error:
|
||||
return None
|
||||
if error["ERRORCODE"] == 4:
|
||||
return InvalidCredsError(error["ERRORMESSAGE"])
|
||||
return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"])
|
||||
|
||||
|
||||
class LinodeConnection(ConnectionKey):
|
||||
"""
|
||||
A connection to the Linode API
|
||||
|
||||
Wraps SSL connections to the Linode API, automagically injecting the
|
||||
parameters that the API needs for each request.
|
||||
"""
|
||||
host = API_HOST
|
||||
responseCls = LinodeResponse
|
||||
|
||||
def add_default_params(self, params):
|
||||
"""
|
||||
Add parameters that are necessary for every request
|
||||
|
||||
This method adds ``api_key`` and ``api_responseFormat`` to
|
||||
the request.
|
||||
"""
|
||||
params["api_key"] = self.key
|
||||
# Be explicit about this in case the default changes.
|
||||
params["api_responseFormat"] = "json"
|
||||
return params
|
||||
652
awx/lib/site-packages/libcloud/common/openstack.py
Normal file
652
awx/lib/site-packages/libcloud/common/openstack.py
Normal file
@@ -0,0 +1,652 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Common utilities for OpenStack
|
||||
"""
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.iso8601 import parse_date
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, Response
|
||||
from libcloud.compute.types import (LibcloudError, InvalidCredsError,
|
||||
MalformedResponseError)
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
AUTH_API_VERSION = '1.1'
|
||||
|
||||
# Auth versions which contain token expiration information.
|
||||
AUTH_VERSIONS_WITH_EXPIRES = [
|
||||
'1.1',
|
||||
'2.0',
|
||||
'2.0_apikey',
|
||||
'2.0_password'
|
||||
]
|
||||
|
||||
# How many seconds to substract from the auth token expiration time before
|
||||
# testing if the token is still valid.
|
||||
# The time is subtracted to account for the HTTP request latency and prevent
|
||||
# user from getting "InvalidCredsError" if token is about to expire.
|
||||
AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5
|
||||
|
||||
__all__ = [
|
||||
'OpenStackBaseConnection',
|
||||
'OpenStackAuthConnection',
|
||||
'OpenStackServiceCatalog',
|
||||
'OpenStackDriverMixin',
|
||||
|
||||
'AUTH_TOKEN_EXPIRES_GRACE_SECONDS'
|
||||
]
|
||||
|
||||
|
||||
# @TODO: Refactor for re-use by other openstack drivers
|
||||
class OpenStackAuthResponse(Response):
|
||||
def success(self):
|
||||
return True
|
||||
|
||||
def parse_body(self):
|
||||
if not self.body:
|
||||
return None
|
||||
|
||||
if 'content-type' in self.headers:
|
||||
key = 'content-type'
|
||||
elif 'Content-Type' in self.headers:
|
||||
key = 'Content-Type'
|
||||
else:
|
||||
raise LibcloudError('Missing content-type header',
|
||||
driver=OpenStackAuthConnection)
|
||||
|
||||
content_type = self.headers[key]
|
||||
if content_type.find(';') != -1:
|
||||
content_type = content_type.split(';')[0]
|
||||
|
||||
if content_type == 'application/json':
|
||||
try:
|
||||
data = json.loads(self.body)
|
||||
except:
|
||||
raise MalformedResponseError('Failed to parse JSON',
|
||||
body=self.body,
|
||||
driver=OpenStackAuthConnection)
|
||||
elif content_type == 'text/plain':
|
||||
data = self.body
|
||||
else:
|
||||
data = self.body
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class OpenStackAuthConnection(ConnectionUserAndKey):
|
||||
|
||||
responseCls = OpenStackAuthResponse
|
||||
name = 'OpenStack Auth'
|
||||
timeout = None
|
||||
|
||||
def __init__(self, parent_conn, auth_url, auth_version, user_id, key,
|
||||
tenant_name=None, timeout=None):
|
||||
self.parent_conn = parent_conn
|
||||
# enable tests to use the same mock connection classes.
|
||||
self.conn_classes = parent_conn.conn_classes
|
||||
|
||||
super(OpenStackAuthConnection, self).__init__(
|
||||
user_id, key, url=auth_url, timeout=timeout)
|
||||
|
||||
self.auth_version = auth_version
|
||||
self.auth_url = auth_url
|
||||
self.driver = self.parent_conn.driver
|
||||
self.tenant_name = tenant_name
|
||||
self.timeout = timeout
|
||||
|
||||
self.urls = {}
|
||||
self.auth_token = None
|
||||
self.auth_token_expires = None
|
||||
self.auth_user_info = None
|
||||
|
||||
def morph_action_hook(self, action):
|
||||
(_, _, _, request_path) = self._tuple_from_url(self.auth_url)
|
||||
|
||||
if request_path == '':
|
||||
# No path is provided in the auth_url, use action passed to this
|
||||
# method.
|
||||
return action
|
||||
|
||||
return request_path
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Accept'] = 'application/json'
|
||||
headers['Content-Type'] = 'application/json; charset=UTF-8'
|
||||
return headers
|
||||
|
||||
def authenticate(self, force=False):
|
||||
"""
|
||||
Authenticate against the keystone api.
|
||||
|
||||
:param force: Forcefully update the token even if it's already cached
|
||||
and still valid.
|
||||
:type force: ``bool``
|
||||
"""
|
||||
if not force and self.auth_version in AUTH_VERSIONS_WITH_EXPIRES \
|
||||
and self.is_token_valid():
|
||||
# If token is still valid, there is no need to re-authenticate
|
||||
return self
|
||||
|
||||
if self.auth_version == "1.0":
|
||||
return self.authenticate_1_0()
|
||||
elif self.auth_version == "1.1":
|
||||
return self.authenticate_1_1()
|
||||
elif self.auth_version == "2.0" or self.auth_version == "2.0_apikey":
|
||||
return self.authenticate_2_0_with_apikey()
|
||||
elif self.auth_version == "2.0_password":
|
||||
return self.authenticate_2_0_with_password()
|
||||
else:
|
||||
raise LibcloudError('Unsupported Auth Version requested')
|
||||
|
||||
def authenticate_1_0(self):
|
||||
headers = {
|
||||
'X-Auth-User': self.user_id,
|
||||
'X-Auth-Key': self.key,
|
||||
}
|
||||
|
||||
resp = self.request('/v1.0', headers=headers, method='GET')
|
||||
|
||||
if resp.status == httplib.UNAUTHORIZED:
|
||||
# HTTP UNAUTHORIZED (401): auth failed
|
||||
raise InvalidCredsError()
|
||||
elif resp.status not in [httplib.NO_CONTENT, httplib.OK]:
|
||||
body = 'code: %s body:%s headers:%s' % (resp.status,
|
||||
resp.body,
|
||||
resp.headers)
|
||||
raise MalformedResponseError('Malformed response', body=body,
|
||||
driver=self.driver)
|
||||
else:
|
||||
headers = resp.headers
|
||||
# emulate the auth 1.1 URL list
|
||||
self.urls = {}
|
||||
self.urls['cloudServers'] = \
|
||||
[{'publicURL': headers.get('x-server-management-url', None)}]
|
||||
self.urls['cloudFilesCDN'] = \
|
||||
[{'publicURL': headers.get('x-cdn-management-url', None)}]
|
||||
self.urls['cloudFiles'] = \
|
||||
[{'publicURL': headers.get('x-storage-url', None)}]
|
||||
self.auth_token = headers.get('x-auth-token', None)
|
||||
self.auth_user_info = None
|
||||
|
||||
if not self.auth_token:
|
||||
raise MalformedResponseError('Missing X-Auth-Token in \
|
||||
response headers')
|
||||
|
||||
return self
|
||||
|
||||
def authenticate_1_1(self):
|
||||
reqbody = json.dumps({'credentials': {'username': self.user_id,
|
||||
'key': self.key}})
|
||||
resp = self.request('/v1.1/auth', data=reqbody, headers={},
|
||||
method='POST')
|
||||
|
||||
if resp.status == httplib.UNAUTHORIZED:
|
||||
# HTTP UNAUTHORIZED (401): auth failed
|
||||
raise InvalidCredsError()
|
||||
elif resp.status != httplib.OK:
|
||||
body = 'code: %s body:%s' % (resp.status, resp.body)
|
||||
raise MalformedResponseError('Malformed response', body=body,
|
||||
driver=self.driver)
|
||||
else:
|
||||
try:
|
||||
body = json.loads(resp.body)
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
raise MalformedResponseError('Failed to parse JSON', e)
|
||||
|
||||
try:
|
||||
expires = body['auth']['token']['expires']
|
||||
|
||||
self.auth_token = body['auth']['token']['id']
|
||||
self.auth_token_expires = parse_date(expires)
|
||||
self.urls = body['auth']['serviceCatalog']
|
||||
self.auth_user_info = None
|
||||
except KeyError:
|
||||
e = sys.exc_info()[1]
|
||||
raise MalformedResponseError('Auth JSON response is \
|
||||
missing required elements', e)
|
||||
|
||||
return self
|
||||
|
||||
def authenticate_2_0_with_apikey(self):
|
||||
# API Key based authentication uses the RAX-KSKEY extension.
|
||||
# http://s.apache.org/oAi
|
||||
data = {'auth':
|
||||
{'RAX-KSKEY:apiKeyCredentials':
|
||||
{'username': self.user_id, 'apiKey': self.key}}}
|
||||
if self.tenant_name:
|
||||
data['auth']['tenantName'] = self.tenant_name
|
||||
reqbody = json.dumps(data)
|
||||
return self.authenticate_2_0_with_body(reqbody)
|
||||
|
||||
def authenticate_2_0_with_password(self):
|
||||
# Password based authentication is the only 'core' authentication
|
||||
# method in Keystone at this time.
|
||||
# 'keystone' - http://s.apache.org/e8h
|
||||
data = {'auth':
|
||||
{'passwordCredentials':
|
||||
{'username': self.user_id, 'password': self.key}}}
|
||||
if self.tenant_name:
|
||||
data['auth']['tenantName'] = self.tenant_name
|
||||
reqbody = json.dumps(data)
|
||||
return self.authenticate_2_0_with_body(reqbody)
|
||||
|
||||
def authenticate_2_0_with_body(self, reqbody):
|
||||
resp = self.request('/v2.0/tokens', data=reqbody,
|
||||
headers={'Content-Type': 'application/json'},
|
||||
method='POST')
|
||||
if resp.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError()
|
||||
elif resp.status not in [httplib.OK,
|
||||
httplib.NON_AUTHORITATIVE_INFORMATION]:
|
||||
body = 'code: %s body: %s' % (resp.status, resp.body)
|
||||
raise MalformedResponseError('Malformed response', body=body,
|
||||
driver=self.driver)
|
||||
else:
|
||||
try:
|
||||
body = json.loads(resp.body)
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
raise MalformedResponseError('Failed to parse JSON', e)
|
||||
|
||||
try:
|
||||
access = body['access']
|
||||
expires = access['token']['expires']
|
||||
|
||||
self.auth_token = access['token']['id']
|
||||
self.auth_token_expires = parse_date(expires)
|
||||
self.urls = access['serviceCatalog']
|
||||
self.auth_user_info = access.get('user', {})
|
||||
except KeyError:
|
||||
e = sys.exc_info()[1]
|
||||
raise MalformedResponseError('Auth JSON response is \
|
||||
missing required elements', e)
|
||||
|
||||
return self
|
||||
|
||||
def is_token_valid(self):
|
||||
"""
|
||||
Return True if the current auth token is already cached and hasn't
|
||||
expired yet.
|
||||
|
||||
:return: ``True`` if the token is still valid, ``False`` otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
if not self.auth_token:
|
||||
return False
|
||||
|
||||
if not self.auth_token_expires:
|
||||
return False
|
||||
|
||||
expires = self.auth_token_expires - \
|
||||
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS)
|
||||
|
||||
time_tuple_expires = expires.utctimetuple()
|
||||
time_tuple_now = datetime.datetime.utcnow().utctimetuple()
|
||||
|
||||
if time_tuple_now < time_tuple_expires:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class OpenStackServiceCatalog(object):
|
||||
"""
|
||||
http://docs.openstack.org/api/openstack-identity-service/2.0/content/
|
||||
|
||||
This class should be instanciated with the contents of the
|
||||
'serviceCatalog' in the auth response. This will do the work of figuring
|
||||
out which services actually exist in the catalog as well as split them up
|
||||
by type, name, and region if available
|
||||
"""
|
||||
|
||||
_auth_version = None
|
||||
_service_catalog = None
|
||||
|
||||
def __init__(self, service_catalog, ex_force_auth_version=None):
|
||||
self._auth_version = ex_force_auth_version or AUTH_API_VERSION
|
||||
self._service_catalog = {}
|
||||
|
||||
# Check this way because there are a couple of different 2.0_*
|
||||
# auth types.
|
||||
if '2.0' in self._auth_version:
|
||||
self._parse_auth_v2(service_catalog)
|
||||
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
|
||||
self._parse_auth_v1(service_catalog)
|
||||
else:
|
||||
raise LibcloudError('auth version "%s" not supported'
|
||||
% (self._auth_version))
|
||||
|
||||
def get_catalog(self):
|
||||
return self._service_catalog
|
||||
|
||||
def get_public_urls(self, service_type=None, name=None):
|
||||
endpoints = self.get_endpoints(service_type=service_type,
|
||||
name=name)
|
||||
|
||||
result = []
|
||||
for endpoint in endpoints:
|
||||
if 'publicURL' in endpoint:
|
||||
result.append(endpoint['publicURL'])
|
||||
|
||||
return result
|
||||
|
||||
def get_endpoints(self, service_type=None, name=None):
|
||||
eps = []
|
||||
|
||||
if '2.0' in self._auth_version:
|
||||
endpoints = self._service_catalog.get(service_type, {}) \
|
||||
.get(name, {})
|
||||
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
|
||||
endpoints = self._service_catalog.get(name, {})
|
||||
|
||||
for regionName, values in endpoints.items():
|
||||
eps.append(values[0])
|
||||
|
||||
return eps
|
||||
|
||||
def get_endpoint(self, service_type=None, name=None, region=None):
|
||||
if '2.0' in self._auth_version:
|
||||
endpoint = self._service_catalog.get(service_type, {}) \
|
||||
.get(name, {}).get(region, [])
|
||||
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
|
||||
endpoint = self._service_catalog.get(name, {}).get(region, [])
|
||||
|
||||
# ideally an endpoint either isn't found or only one match is found.
|
||||
if len(endpoint) == 1:
|
||||
return endpoint[0]
|
||||
else:
|
||||
return {}
|
||||
|
||||
def _parse_auth_v1(self, service_catalog):
|
||||
for service, endpoints in service_catalog.items():
|
||||
|
||||
self._service_catalog[service] = {}
|
||||
|
||||
for endpoint in endpoints:
|
||||
region = endpoint.get('region')
|
||||
|
||||
if region not in self._service_catalog[service]:
|
||||
self._service_catalog[service][region] = []
|
||||
|
||||
self._service_catalog[service][region].append(endpoint)
|
||||
|
||||
def _parse_auth_v2(self, service_catalog):
|
||||
for service in service_catalog:
|
||||
service_type = service['type']
|
||||
service_name = service.get('name', None)
|
||||
|
||||
if service_type not in self._service_catalog:
|
||||
self._service_catalog[service_type] = {}
|
||||
|
||||
if service_name not in self._service_catalog[service_type]:
|
||||
self._service_catalog[service_type][service_name] = {}
|
||||
|
||||
for endpoint in service.get('endpoints', []):
|
||||
region = endpoint.get('region', None)
|
||||
|
||||
catalog = self._service_catalog[service_type][service_name]
|
||||
if region not in catalog:
|
||||
catalog[region] = []
|
||||
|
||||
catalog[region].append(endpoint)
|
||||
|
||||
|
||||
class OpenStackBaseConnection(ConnectionUserAndKey):
|
||||
|
||||
"""
|
||||
Base class for OpenStack connections.
|
||||
|
||||
:param user_id: User name to use when authenticating
|
||||
:type user_id: ``str``
|
||||
|
||||
:param key: Secret to use when authenticating.
|
||||
:type key: ``str``
|
||||
|
||||
:param secure: Use HTTPS? (True by default.)
|
||||
:type secure: ``bool``
|
||||
|
||||
:param ex_force_base_url: Base URL for connection requests. If
|
||||
not specified, this will be determined by authenticating.
|
||||
:type ex_force_base_url: ``str``
|
||||
|
||||
:param ex_force_auth_url: Base URL for authentication requests.
|
||||
:type ex_force_auth_url: ``str``
|
||||
|
||||
:param ex_force_auth_version: Authentication version to use. If
|
||||
not specified, defaults to AUTH_API_VERSION.
|
||||
:type ex_force_auth_version: ``str``
|
||||
|
||||
:param ex_force_auth_token: Authentication token to use for
|
||||
connection requests. If specified, the connection will not attempt
|
||||
to authenticate, and the value of ex_force_base_url will be used to
|
||||
determine the base request URL. If ex_force_auth_token is passed in,
|
||||
ex_force_base_url must also be provided.
|
||||
:type ex_force_auth_token: ``str``
|
||||
|
||||
:param ex_tenant_name: When authenticating, provide this tenant
|
||||
name to the identity service. A scoped token will be returned.
|
||||
Some cloud providers require the tenant name to be provided at
|
||||
authentication time. Others will use a default tenant if none
|
||||
is provided.
|
||||
:type ex_tenant_name: ``str``
|
||||
|
||||
:param ex_force_service_type: Service type to use when selecting an
|
||||
service. If not specified, a provider specific default will be used.
|
||||
:type ex_force_service_type: ``str``
|
||||
|
||||
:param ex_force_service_name: Service name to use when selecting an
|
||||
service. If not specified, a provider specific default will be used.
|
||||
:type ex_force_service_name: ``str``
|
||||
|
||||
:param ex_force_service_region: Region to use when selecting an
|
||||
service. If not specified, a provider specific default will be used.
|
||||
:type ex_force_service_region: ``str``
|
||||
"""
|
||||
|
||||
auth_url = None
|
||||
auth_token = None
|
||||
auth_token_expires = None
|
||||
auth_user_info = None
|
||||
service_catalog = None
|
||||
service_type = None
|
||||
service_name = None
|
||||
service_region = None
|
||||
_auth_version = None
|
||||
|
||||
def __init__(self, user_id, key, secure=True,
|
||||
host=None, port=None, timeout=None,
|
||||
ex_force_base_url=None,
|
||||
ex_force_auth_url=None,
|
||||
ex_force_auth_version=None,
|
||||
ex_force_auth_token=None,
|
||||
ex_tenant_name=None,
|
||||
ex_force_service_type=None,
|
||||
ex_force_service_name=None,
|
||||
ex_force_service_region=None):
|
||||
super(OpenStackBaseConnection, self).__init__(
|
||||
user_id, key, secure=secure, timeout=timeout)
|
||||
|
||||
if ex_force_auth_version:
|
||||
self._auth_version = ex_force_auth_version
|
||||
|
||||
self._ex_force_base_url = ex_force_base_url
|
||||
self._ex_force_auth_url = ex_force_auth_url
|
||||
self._ex_force_auth_token = ex_force_auth_token
|
||||
self._ex_tenant_name = ex_tenant_name
|
||||
self._ex_force_service_type = ex_force_service_type
|
||||
self._ex_force_service_name = ex_force_service_name
|
||||
self._ex_force_service_region = ex_force_service_region
|
||||
|
||||
if ex_force_auth_token and not ex_force_base_url:
|
||||
raise LibcloudError(
|
||||
'Must also provide ex_force_base_url when specifying '
|
||||
'ex_force_auth_token.')
|
||||
|
||||
if ex_force_auth_token:
|
||||
self.auth_token = ex_force_auth_token
|
||||
|
||||
if not self._auth_version:
|
||||
self._auth_version = AUTH_API_VERSION
|
||||
|
||||
auth_url = self._get_auth_url()
|
||||
|
||||
if not auth_url:
|
||||
raise LibcloudError('OpenStack instance must ' +
|
||||
'have auth_url set')
|
||||
|
||||
osa = OpenStackAuthConnection(self, auth_url, self._auth_version,
|
||||
self.user_id, self.key,
|
||||
tenant_name=self._ex_tenant_name,
|
||||
timeout=self.timeout)
|
||||
self._osa = osa
|
||||
|
||||
def _get_auth_url(self):
|
||||
"""
|
||||
Retrieve auth url for this instance using either "ex_force_auth_url"
|
||||
constructor kwarg of "auth_url" class variable.
|
||||
"""
|
||||
auth_url = self.auth_url
|
||||
|
||||
if self._ex_force_auth_url is not None:
|
||||
auth_url = self._ex_force_auth_url
|
||||
|
||||
return auth_url
|
||||
|
||||
def get_service_catalog(self):
|
||||
if self.service_catalog is None:
|
||||
self._populate_hosts_and_request_paths()
|
||||
|
||||
return self.service_catalog
|
||||
|
||||
def get_endpoint(self):
|
||||
"""
|
||||
Selects the endpoint to use based on provider specific values,
|
||||
or overrides passed in by the user when setting up the driver.
|
||||
|
||||
:returns: url of the relevant endpoint for the driver
|
||||
"""
|
||||
service_type = self.service_type
|
||||
service_name = self.service_name
|
||||
service_region = self.service_region
|
||||
if self._ex_force_service_type:
|
||||
service_type = self._ex_force_service_type
|
||||
if self._ex_force_service_name:
|
||||
service_name = self._ex_force_service_name
|
||||
if self._ex_force_service_region:
|
||||
service_region = self._ex_force_service_region
|
||||
|
||||
ep = self.service_catalog.get_endpoint(service_type=service_type,
|
||||
name=service_name,
|
||||
region=service_region)
|
||||
if 'publicURL' in ep:
|
||||
return ep['publicURL']
|
||||
|
||||
raise LibcloudError('Could not find specified endpoint')
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['X-Auth-Token'] = self.auth_token
|
||||
headers['Accept'] = self.accept_format
|
||||
return headers
|
||||
|
||||
def morph_action_hook(self, action):
|
||||
self._populate_hosts_and_request_paths()
|
||||
return super(OpenStackBaseConnection, self).morph_action_hook(action)
|
||||
|
||||
def request(self, **kwargs):
|
||||
return super(OpenStackBaseConnection, self).request(**kwargs)
|
||||
|
||||
def _set_up_connection_info(self, url):
|
||||
result = self._tuple_from_url(url)
|
||||
(self.host, self.port, self.secure, self.request_path) = result
|
||||
|
||||
def _populate_hosts_and_request_paths(self):
|
||||
"""
|
||||
OpenStack uses a separate host for API calls which is only provided
|
||||
after an initial authentication request.
|
||||
"""
|
||||
osa = self._osa
|
||||
|
||||
if self._ex_force_auth_token:
|
||||
# If ex_force_auth_token is provided we always hit the api directly
|
||||
# and never try to authenticate.
|
||||
#
|
||||
# Note: When ex_force_auth_token is provided, ex_force_base_url
|
||||
# must be provided as well.
|
||||
self._set_up_connection_info(url=self._ex_force_base_url)
|
||||
return
|
||||
|
||||
if not osa.is_token_valid():
|
||||
# Token is not available or it has expired. Need to retrieve a
|
||||
# new one.
|
||||
osa.authenticate() # may throw InvalidCreds
|
||||
|
||||
self.auth_token = osa.auth_token
|
||||
self.auth_token_expires = osa.auth_token_expires
|
||||
self.auth_user_info = osa.auth_user_info
|
||||
|
||||
# Pull out and parse the service catalog
|
||||
osc = OpenStackServiceCatalog(
|
||||
osa.urls, ex_force_auth_version=self._auth_version)
|
||||
self.service_catalog = osc
|
||||
|
||||
url = self._ex_force_base_url or self.get_endpoint()
|
||||
self._set_up_connection_info(url=url)
|
||||
|
||||
|
||||
class OpenStackDriverMixin(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._ex_force_base_url = kwargs.get('ex_force_base_url', None)
|
||||
self._ex_force_auth_url = kwargs.get('ex_force_auth_url', None)
|
||||
self._ex_force_auth_version = kwargs.get('ex_force_auth_version', None)
|
||||
self._ex_force_auth_token = kwargs.get('ex_force_auth_token', None)
|
||||
self._ex_tenant_name = kwargs.get('ex_tenant_name', None)
|
||||
self._ex_force_service_type = kwargs.get('ex_force_service_type', None)
|
||||
self._ex_force_service_name = kwargs.get('ex_force_service_name', None)
|
||||
self._ex_force_service_region = kwargs.get('ex_force_service_region',
|
||||
None)
|
||||
|
||||
def openstack_connection_kwargs(self):
|
||||
"""
|
||||
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
rv = {}
|
||||
if self._ex_force_base_url:
|
||||
rv['ex_force_base_url'] = self._ex_force_base_url
|
||||
if self._ex_force_auth_token:
|
||||
rv['ex_force_auth_token'] = self._ex_force_auth_token
|
||||
if self._ex_force_auth_url:
|
||||
rv['ex_force_auth_url'] = self._ex_force_auth_url
|
||||
if self._ex_force_auth_version:
|
||||
rv['ex_force_auth_version'] = self._ex_force_auth_version
|
||||
if self._ex_tenant_name:
|
||||
rv['ex_tenant_name'] = self._ex_tenant_name
|
||||
if self._ex_force_service_type:
|
||||
rv['ex_force_service_type'] = self._ex_force_service_type
|
||||
if self._ex_force_service_name:
|
||||
rv['ex_force_service_name'] = self._ex_force_service_name
|
||||
if self._ex_force_service_region:
|
||||
rv['ex_force_service_region'] = self._ex_force_service_region
|
||||
return rv
|
||||
24
awx/lib/site-packages/libcloud/common/rackspace.py
Normal file
24
awx/lib/site-packages/libcloud/common/rackspace.py
Normal file
@@ -0,0 +1,24 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Common settings for Rackspace Cloud Servers and Cloud Files
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'AUTH_URL'
|
||||
]
|
||||
|
||||
AUTH_URL = 'https://auth.api.rackspacecloud.com'
|
||||
144
awx/lib/site-packages/libcloud/common/types.py
Normal file
144
awx/lib/site-packages/libcloud/common/types.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
|
||||
__all__ = [
|
||||
"LibcloudError",
|
||||
"MalformedResponseError",
|
||||
"ProviderError",
|
||||
"InvalidCredsError",
|
||||
"InvalidCredsException",
|
||||
"LazyList"
|
||||
]
|
||||
|
||||
|
||||
class LibcloudError(Exception):
|
||||
"""The base class for other libcloud exceptions"""
|
||||
|
||||
def __init__(self, value, driver=None):
|
||||
self.value = value
|
||||
self.driver = driver
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return ("<LibcloudError in "
|
||||
+ repr(self.driver)
|
||||
+ " "
|
||||
+ repr(self.value) + ">")
|
||||
|
||||
|
||||
class MalformedResponseError(LibcloudError):
|
||||
"""Exception for the cases when a provider returns a malformed
|
||||
response, e.g. you request JSON and provider returns
|
||||
'<h3>something</h3>' due to some error on their side."""
|
||||
|
||||
def __init__(self, value, body=None, driver=None):
|
||||
self.value = value
|
||||
self.driver = driver
|
||||
self.body = body
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return ("<MalformedResponseException in "
|
||||
+ repr(self.driver)
|
||||
+ " "
|
||||
+ repr(self.value)
|
||||
+ ">: "
|
||||
+ repr(self.body))
|
||||
|
||||
|
||||
class ProviderError(LibcloudError):
|
||||
"""
|
||||
Exception used when provider gives back
|
||||
error response (HTTP 4xx, 5xx) for a request.
|
||||
|
||||
Specific sub types can be derieved for errors like
|
||||
HTTP 401 : InvalidCredsError
|
||||
HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError
|
||||
"""
|
||||
|
||||
def __init__(self, value, http_code,
|
||||
driver=None):
|
||||
self.value = value
|
||||
self.http_code = http_code
|
||||
self.driver = driver
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.value)
|
||||
|
||||
|
||||
class InvalidCredsError(ProviderError):
|
||||
"""Exception used when invalid credentials are used on a provider."""
|
||||
|
||||
def __init__(self, value='Invalid credentials with the provider',
|
||||
driver=None):
|
||||
super(InvalidCredsError, self).__init__(value,
|
||||
http_code=httplib.UNAUTHORIZED,
|
||||
driver=driver)
|
||||
|
||||
|
||||
# Deprecated alias of :class:`InvalidCredsError`
|
||||
InvalidCredsException = InvalidCredsError
|
||||
|
||||
|
||||
class LazyList(object):
|
||||
|
||||
def __init__(self, get_more, value_dict=None):
|
||||
self._data = []
|
||||
self._last_key = None
|
||||
self._exhausted = False
|
||||
self._all_loaded = False
|
||||
self._get_more = get_more
|
||||
self._value_dict = value_dict or {}
|
||||
|
||||
def __iter__(self):
|
||||
if not self._all_loaded:
|
||||
self._load_all()
|
||||
|
||||
data = self._data
|
||||
for i in data:
|
||||
yield i
|
||||
|
||||
def __getitem__(self, index):
|
||||
if index >= len(self._data) and not self._all_loaded:
|
||||
self._load_all()
|
||||
|
||||
return self._data[index]
|
||||
|
||||
def __len__(self):
|
||||
self._load_all()
|
||||
return len(self._data)
|
||||
|
||||
def __repr__(self):
|
||||
self._load_all()
|
||||
repr_string = ', ' .join([repr(item) for item in self._data])
|
||||
repr_string = '[%s]' % (repr_string)
|
||||
return repr_string
|
||||
|
||||
def _load_all(self):
|
||||
while not self._exhausted:
|
||||
newdata, self._last_key, self._exhausted = \
|
||||
self._get_more(last_key=self._last_key,
|
||||
value_dict=self._value_dict)
|
||||
self._data.extend(newdata)
|
||||
self._all_loaded = True
|
||||
108
awx/lib/site-packages/libcloud/common/xmlrpc.py
Normal file
108
awx/lib/site-packages/libcloud/common/xmlrpc.py
Normal file
@@ -0,0 +1,108 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Base classes for working with xmlrpc APIs
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from libcloud.utils.py3 import xmlrpclib
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.common.base import Response, Connection
|
||||
|
||||
|
||||
class ProtocolError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ErrorCodeMixin(object):
|
||||
"""
|
||||
This is a helper for API's that have a well defined collection of error
|
||||
codes that are easily parsed out of error messages. It acts as a factory:
|
||||
it finds the right exception for the error code, fetches any parameters it
|
||||
needs from the context and raises it.
|
||||
"""
|
||||
|
||||
exceptions = {}
|
||||
|
||||
def raise_exception_for_error(self, error_code, message):
|
||||
exceptionCls = self.exceptions.get(error_code, None)
|
||||
if exceptionCls is None:
|
||||
return
|
||||
context = self.connection.context
|
||||
driver = self.connection.driver
|
||||
params = {}
|
||||
if hasattr(exceptionCls, 'kwargs'):
|
||||
for key in exceptionCls.kwargs:
|
||||
if key in context:
|
||||
params[key] = context[key]
|
||||
raise exceptionCls(value=message, driver=driver, **params)
|
||||
|
||||
|
||||
class XMLRPCResponse(ErrorCodeMixin, Response):
|
||||
|
||||
defaultExceptionCls = Exception
|
||||
|
||||
def success(self):
|
||||
return self.status == httplib.OK
|
||||
|
||||
def parse_body(self):
|
||||
try:
|
||||
params, methodname = xmlrpclib.loads(self.body)
|
||||
if len(params) == 1:
|
||||
params = params[0]
|
||||
return params
|
||||
except xmlrpclib.Fault:
|
||||
e = sys.exc_info()[1]
|
||||
self.raise_exception_for_error(e.faultCode, e.faultString)
|
||||
error_string = '%s: %s' % (e.faultCode, e.faultString)
|
||||
raise self.defaultExceptionCls(error_string)
|
||||
|
||||
def parse_error(self):
|
||||
msg = 'Server returned an invalid xmlrpc response (%d)' % (self.status)
|
||||
raise ProtocolError(msg)
|
||||
|
||||
|
||||
class XMLRPCConnection(Connection):
|
||||
"""
|
||||
Connection class which can call XMLRPC based API's.
|
||||
|
||||
This class uses the xmlrpclib marshalling and demarshalling code but uses
|
||||
the http transports provided by libcloud giving it better certificate
|
||||
validation and debugging helpers than the core client library.
|
||||
"""
|
||||
|
||||
responseCls = XMLRPCResponse
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Content-Type'] = 'text/xml'
|
||||
return headers
|
||||
|
||||
def request(self, method_name, *args, **kwargs):
|
||||
"""
|
||||
Call a given `method_name`.
|
||||
|
||||
:type method_name: ``str``
|
||||
:param method_name: A method exposed by the xmlrpc endpoint that you
|
||||
are connecting to.
|
||||
|
||||
:type args: ``tuple``
|
||||
:param args: Arguments to invoke with method with.
|
||||
"""
|
||||
endpoint = kwargs.get('endpoint', self.endpoint)
|
||||
data = xmlrpclib.dumps(args, methodname=method_name, allow_none=True)
|
||||
return super(XMLRPCConnection, self).request(endpoint,
|
||||
data=data,
|
||||
method='POST')
|
||||
3
awx/lib/site-packages/libcloud/compute/__init__.py
Normal file
3
awx/lib/site-packages/libcloud/compute/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Module for working with Cloud Servers
|
||||
"""
|
||||
1477
awx/lib/site-packages/libcloud/compute/base.py
Normal file
1477
awx/lib/site-packages/libcloud/compute/base.py
Normal file
File diff suppressed because it is too large
Load Diff
263
awx/lib/site-packages/libcloud/compute/deployment.py
Normal file
263
awx/lib/site-packages/libcloud/compute/deployment.py
Normal file
@@ -0,0 +1,263 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Provides generic deployment steps for machines post boot.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import binascii
|
||||
|
||||
from libcloud.utils.py3 import basestring, PY3
|
||||
|
||||
|
||||
class Deployment(object):
|
||||
"""
|
||||
Base class for deployment tasks.
|
||||
"""
|
||||
|
||||
def run(self, node, client):
|
||||
"""
|
||||
Runs this deployment task on node using the client provided.
|
||||
|
||||
:type node: :class:`Node`
|
||||
:keyword node: Node to operate one
|
||||
|
||||
:type client: :class:`BaseSSHClient`
|
||||
:keyword client: Connected SSH client to use.
|
||||
|
||||
:return: :class:`Node`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'run not implemented for this deployment')
|
||||
|
||||
def _get_string_value(self, argument_name, argument_value):
|
||||
if not isinstance(argument_value, basestring) and \
|
||||
not hasattr(argument_value, 'read'):
|
||||
raise TypeError('%s argument must be a string or a file-like '
|
||||
'object' % (argument_name))
|
||||
|
||||
if hasattr(argument_value, 'read'):
|
||||
argument_value = argument_value.read()
|
||||
|
||||
return argument_value
|
||||
|
||||
|
||||
class SSHKeyDeployment(Deployment):
|
||||
"""
|
||||
Installs a public SSH Key onto a server.
|
||||
"""
|
||||
|
||||
def __init__(self, key):
|
||||
"""
|
||||
:type key: ``str`` or :class:`File` object
|
||||
:keyword key: Contents of the public key write or a file object which
|
||||
can be read.
|
||||
"""
|
||||
self.key = self._get_string_value(argument_name='key',
|
||||
argument_value=key)
|
||||
|
||||
def run(self, node, client):
|
||||
"""
|
||||
Installs SSH key into ``.ssh/authorized_keys``
|
||||
|
||||
See also :class:`Deployment.run`
|
||||
"""
|
||||
client.put(".ssh/authorized_keys", contents=self.key, mode='a')
|
||||
return node
|
||||
|
||||
|
||||
class FileDeployment(Deployment):
|
||||
"""
|
||||
Installs a file on the server.
|
||||
"""
|
||||
|
||||
def __init__(self, source, target):
|
||||
"""
|
||||
:type source: ``str``
|
||||
:keyword source: Local path of file to be installed
|
||||
|
||||
:type target: ``str``
|
||||
:keyword target: Path to install file on node
|
||||
"""
|
||||
self.source = source
|
||||
self.target = target
|
||||
|
||||
def run(self, node, client):
|
||||
"""
|
||||
Upload the file, retaining permissions.
|
||||
|
||||
See also :class:`Deployment.run`
|
||||
"""
|
||||
perms = int(oct(os.stat(self.source).st_mode)[4:], 8)
|
||||
|
||||
with open(self.source, 'rb') as fp:
|
||||
content = fp.read()
|
||||
|
||||
client.put(path=self.target, chmod=perms,
|
||||
contents=content)
|
||||
return node
|
||||
|
||||
|
||||
class ScriptDeployment(Deployment):
|
||||
"""
|
||||
Runs an arbitrary shell script on the server.
|
||||
|
||||
This step works by first writing the content of the shell script (script
|
||||
argument) in a \*.sh file on a remote server and then running that file.
|
||||
|
||||
If you are running a non-shell script, make sure to put the appropriate
|
||||
shebang to the top of the script. You are also advised to do that even if
|
||||
you are running a plan shell script.
|
||||
"""
|
||||
|
||||
def __init__(self, script, args=None, name=None, delete=False):
|
||||
"""
|
||||
:type script: ``str``
|
||||
:keyword script: Contents of the script to run.
|
||||
|
||||
:type args: ``list``
|
||||
:keyword args: Optional command line arguments which get passed to the
|
||||
deployment script file.
|
||||
|
||||
:type name: ``str``
|
||||
:keyword name: Name of the script to upload it as, if not specified,
|
||||
a random name will be chosen.
|
||||
|
||||
:type delete: ``bool``
|
||||
:keyword delete: Whether to delete the script on completion.
|
||||
"""
|
||||
script = self._get_string_value(argument_name='script',
|
||||
argument_value=script)
|
||||
|
||||
self.script = script
|
||||
self.args = args or []
|
||||
self.stdout = None
|
||||
self.stderr = None
|
||||
self.exit_status = None
|
||||
self.delete = delete
|
||||
self.name = name
|
||||
|
||||
if self.name is None:
|
||||
# File is put under user's home directory
|
||||
# (~/libcloud_deployment_<random_string>.sh)
|
||||
random_string = binascii.hexlify(os.urandom(4))
|
||||
random_string = random_string.decode('ascii')
|
||||
self.name = 'libcloud_deployment_%s.sh' % (random_string)
|
||||
|
||||
def run(self, node, client):
|
||||
"""
|
||||
Uploads the shell script and then executes it.
|
||||
|
||||
See also :class:`Deployment.run`
|
||||
"""
|
||||
file_path = client.put(path=self.name, chmod=int('755', 8),
|
||||
contents=self.script)
|
||||
|
||||
# Pre-pend cwd if user specified a relative path
|
||||
if self.name[0] != '/':
|
||||
base_path = os.path.dirname(file_path)
|
||||
name = os.path.join(base_path, self.name)
|
||||
else:
|
||||
name = self.name
|
||||
|
||||
cmd = name
|
||||
|
||||
if self.args:
|
||||
# Append arguments to the command
|
||||
cmd = '%s %s' % (name, ' '.join(self.args))
|
||||
else:
|
||||
cmd = name
|
||||
|
||||
self.stdout, self.stderr, self.exit_status = client.run(cmd)
|
||||
|
||||
if self.delete:
|
||||
client.delete(self.name)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
class ScriptFileDeployment(ScriptDeployment):
|
||||
"""
|
||||
Runs an arbitrary shell script from a local file on the server. Same as
|
||||
ScriptDeployment, except that you can pass in a path to the file instead of
|
||||
the script content.
|
||||
"""
|
||||
|
||||
def __init__(self, script_file, args=None, name=None, delete=False):
|
||||
"""
|
||||
:type script_file: ``str``
|
||||
:keyword script_file: Path to a file containing the script to run.
|
||||
|
||||
:type args: ``list``
|
||||
:keyword args: Optional command line arguments which get passed to the
|
||||
deployment script file.
|
||||
|
||||
|
||||
:type name: ``str``
|
||||
:keyword name: Name of the script to upload it as, if not specified,
|
||||
a random name will be chosen.
|
||||
|
||||
:type delete: ``bool``
|
||||
:keyword delete: Whether to delete the script on completion.
|
||||
"""
|
||||
with open(script_file, 'rb') as fp:
|
||||
content = fp.read()
|
||||
|
||||
if PY3:
|
||||
content = content.decode('utf-8')
|
||||
|
||||
super(ScriptFileDeployment, self).__init__(script=content,
|
||||
args=args,
|
||||
name=name,
|
||||
delete=delete)
|
||||
|
||||
|
||||
class MultiStepDeployment(Deployment):
|
||||
"""
|
||||
Runs a chain of Deployment steps.
|
||||
"""
|
||||
def __init__(self, add=None):
|
||||
"""
|
||||
:type add: ``list``
|
||||
:keyword add: Deployment steps to add.
|
||||
"""
|
||||
self.steps = []
|
||||
self.add(add)
|
||||
|
||||
def add(self, add):
|
||||
"""
|
||||
Add a deployment to this chain.
|
||||
|
||||
:type add: Single :class:`Deployment` or a ``list`` of
|
||||
:class:`Deployment`
|
||||
:keyword add: Adds this deployment to the others already in this
|
||||
object.
|
||||
"""
|
||||
if add is not None:
|
||||
add = add if isinstance(add, (list, tuple)) else [add]
|
||||
self.steps.extend(add)
|
||||
|
||||
def run(self, node, client):
|
||||
"""
|
||||
Run each deployment that has been added.
|
||||
|
||||
See also :class:`Deployment.run`
|
||||
"""
|
||||
for s in self.steps:
|
||||
node = s.run(node, client)
|
||||
return node
|
||||
42
awx/lib/site-packages/libcloud/compute/drivers/__init__.py
Normal file
42
awx/lib/site-packages/libcloud/compute/drivers/__init__.py
Normal file
@@ -0,0 +1,42 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Drivers for working with different providers
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'abiquo',
|
||||
'brightbox',
|
||||
'bluebox',
|
||||
'dummy',
|
||||
'ec2',
|
||||
'ecp',
|
||||
'elasticstack',
|
||||
'elastichosts',
|
||||
'cloudsigma',
|
||||
'gce',
|
||||
'gogrid',
|
||||
'hostvirtual',
|
||||
'ibm_sce',
|
||||
'linode',
|
||||
'opennebula',
|
||||
'rackspace',
|
||||
'rimuhosting',
|
||||
'softlayer',
|
||||
'vcloud',
|
||||
'voxel',
|
||||
'vpsnet',
|
||||
]
|
||||
759
awx/lib/site-packages/libcloud/compute/drivers/abiquo.py
Normal file
759
awx/lib/site-packages/libcloud/compute/drivers/abiquo.py
Normal file
@@ -0,0 +1,759 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Abiquo Compute Driver
|
||||
|
||||
The driver implements the compute Abiquo functionality for the Abiquo API.
|
||||
This version is compatible with the following versions of Abiquo:
|
||||
|
||||
* Abiquo 2.0 (http://wiki.abiquo.com/display/ABI20/The+Abiquo+API)
|
||||
* Abiquo 2.2 (http://wiki.abiquo.com/display/ABI22/The+Abiquo+API)
|
||||
"""
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from libcloud.compute.base import NodeDriver, NodeSize
|
||||
from libcloud.compute.types import Provider, LibcloudError
|
||||
from libcloud.common.abiquo import (AbiquoConnection, get_href,
|
||||
AbiquoResponse)
|
||||
from libcloud.compute.base import NodeLocation, NodeImage, Node
|
||||
from libcloud.utils.py3 import tostring
|
||||
|
||||
|
||||
class AbiquoNodeDriver(NodeDriver):
|
||||
"""
|
||||
Implements the :class:`NodeDriver`'s for the Abiquo Compute Provider
|
||||
"""
|
||||
|
||||
type = Provider.ABIQUO
|
||||
name = 'Abiquo'
|
||||
website = 'http://www.abiquo.com/'
|
||||
connectionCls = AbiquoConnection
|
||||
timeout = 2000 # some images take a lot of time!
|
||||
|
||||
# Media Types
|
||||
NODES_MIME_TYPE = 'application/vnd.abiquo.virtualmachineswithnode+xml'
|
||||
NODE_MIME_TYPE = 'application/vnd.abiquo.virtualmachinewithnode+xml'
|
||||
VAPP_MIME_TYPE = 'application/vnd.abiquo.virtualappliance+xml'
|
||||
VM_TASK_MIME_TYPE = 'application/vnd.abiquo.virtualmachinetask+xml'
|
||||
|
||||
# Others constants
|
||||
GIGABYTE = 1073741824
|
||||
|
||||
def __init__(self, user_id, secret, endpoint, **kwargs):
|
||||
"""
|
||||
Initializes Abiquo Driver
|
||||
|
||||
Initializes the :class:`NodeDriver` object and populate the cache.
|
||||
|
||||
:param user_id: identifier of Abiquo user (required)
|
||||
:type user_id: ``str``
|
||||
:param secret: password of the Abiquo user (required)
|
||||
:type secret: ``str``
|
||||
:param endpoint: Abiquo API endpoint (required)
|
||||
:type endpoint: ``str`` that can be parsed as URL
|
||||
"""
|
||||
self.endpoint = endpoint
|
||||
super(AbiquoNodeDriver, self).__init__(key=user_id, secret=secret,
|
||||
secure=False, host=None,
|
||||
port=None, **kwargs)
|
||||
self.ex_populate_cache()
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Create a new node instance in Abiquo
|
||||
|
||||
All the :class:`Node`s need to be defined inside a VirtualAppliance
|
||||
(called :class:`NodeGroup` here). If there is no group name defined,
|
||||
'libcloud' name will be used instead.
|
||||
|
||||
This method wraps these Abiquo actions:
|
||||
|
||||
1. Create a group if it does not exist.
|
||||
2. Register a new node in the group.
|
||||
3. Deploy the node and boot it.
|
||||
4. Retrieves it again to get schedule-time attributes (such as ips
|
||||
and vnc ports).
|
||||
|
||||
The rest of the driver methods has been created in a way that, if any
|
||||
of these actions fail, the user can not reach an inconsistent state
|
||||
|
||||
:keyword name: The name for this new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword size: The size of resources allocated to this node.
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword image: OS Image to boot on node. (required)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword location: Which data center to create a node in. If empty,
|
||||
undefined behavior will be selected. (optional)
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:keyword group_name: Which group this node belongs to. If empty,
|
||||
it will be created into 'libcloud' group. If
|
||||
it does not found any group in the target
|
||||
location (random location if you have not set
|
||||
the parameter), then it will create a new
|
||||
group with this name.
|
||||
:type group_name: c{str}
|
||||
|
||||
:return: The newly created node.
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
# Define the location
|
||||
# To be clear:
|
||||
# 'xml_loc' is the xml element we navigate into (we need links)
|
||||
# 'loc' is the :class:`NodeLocation` entity
|
||||
xml_loc, loc = self._define_create_node_location(**kwargs)
|
||||
|
||||
# Define the Group
|
||||
group = self._define_create_node_group(xml_loc, loc, **kwargs)
|
||||
|
||||
# Register the Node
|
||||
vm = self._define_create_node_node(group, **kwargs)
|
||||
|
||||
# Execute the 'create' in hypervisor action
|
||||
self._deploy_remote(vm)
|
||||
|
||||
# Retrieve it again, to get some schedule-time defined values
|
||||
edit_vm = get_href(vm, 'edit')
|
||||
headers = {'Accept': self.NODE_MIME_TYPE}
|
||||
vm = self.connection.request(edit_vm, headers=headers).object
|
||||
return self._to_node(vm, self)
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""
|
||||
Destroy a node
|
||||
|
||||
Depending on the provider, this may destroy all data associated with
|
||||
the node, including backups.
|
||||
|
||||
:param node: The node to be destroyed
|
||||
:type node: :class:`Node`
|
||||
|
||||
:return: True if the destroy was successful, otherwise False
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
|
||||
# Refresh node state
|
||||
e_vm = self.connection.request(node.extra['uri_id']).object
|
||||
state = e_vm.findtext('state')
|
||||
|
||||
if state in ['ALLOCATED', 'CONFIGURED', 'LOCKED', 'UNKNOWN']:
|
||||
raise LibcloudError('Invalid Node state', self)
|
||||
|
||||
if state != 'NOT_ALLOCATED':
|
||||
# prepare the element that forces the undeploy
|
||||
vm_task = ET.Element('virtualmachinetask')
|
||||
force_undeploy = ET.SubElement(vm_task, 'forceUndeploy')
|
||||
force_undeploy.text = 'True'
|
||||
# Set the URI
|
||||
destroy_uri = node.extra['uri_id'] + '/action/undeploy'
|
||||
# Prepare the headers
|
||||
headers = {'Content-type': self.VM_TASK_MIME_TYPE}
|
||||
res = self.connection.async_request(action=destroy_uri,
|
||||
method='POST',
|
||||
data=tostring(vm_task),
|
||||
headers=headers)
|
||||
|
||||
if state == 'NOT_ALLOCATED' or res.async_success():
|
||||
self.connection.request(action=node.extra['uri_id'],
|
||||
method='DELETE')
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def ex_run_node(self, node):
|
||||
"""
|
||||
Runs a node
|
||||
|
||||
Here there is a bit difference between Abiquo states and libcloud
|
||||
states, so this method is created to have better compatibility. In
|
||||
libcloud, if the node is not running, then it does not exist (avoiding
|
||||
UNKNOWN and temporal states). In Abiquo, you can define a node, and
|
||||
then deploy it.
|
||||
|
||||
If the node is in :class:`NodeState.TERMINATED` libcloud's state and in
|
||||
'NOT_DEPLOYED' Abiquo state, there is a way to run and recover it
|
||||
for libcloud using this method. There is no way to reach this state
|
||||
if you are using only libcloud, but you may have used another Abiquo
|
||||
client and now you want to recover your node to be used by libcloud.
|
||||
|
||||
:param node: The node to run
|
||||
:type node: :class:`Node`
|
||||
|
||||
:return: The node itself, but with the new state
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
# Refresh node state
|
||||
e_vm = self.connection.request(node.extra['uri_id']).object
|
||||
state = e_vm.findtext('state')
|
||||
|
||||
if state != 'NOT_ALLOCATED':
|
||||
raise LibcloudError('Invalid Node state', self)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Deploy the Node
|
||||
# --------------------------------------------------------
|
||||
self._deploy_remote(e_vm)
|
||||
|
||||
# --------------------------------------------------------
|
||||
# Retrieve it again, to get some schedule-defined
|
||||
# values.
|
||||
# --------------------------------------------------------
|
||||
edit_vm = get_href(e_vm, 'edit')
|
||||
headers = {'Accept': self.NODE_MIME_TYPE}
|
||||
e_vm = self.connection.request(edit_vm, headers=headers).object
|
||||
return self._to_node(e_vm, self)
|
||||
|
||||
def ex_populate_cache(self):
|
||||
"""
|
||||
Populate the cache.
|
||||
|
||||
For each connection, it is good to store some objects that will be
|
||||
useful for further requests, such as the 'user' and the 'enterprise'
|
||||
objects.
|
||||
|
||||
Executes the 'login' resource after setting the connection parameters
|
||||
and, if the execution is successful, it sets the 'user' object into
|
||||
cache. After that, it also requests for the 'enterprise' and
|
||||
'locations' data.
|
||||
|
||||
List of locations should remain the same for a single libcloud
|
||||
connection. However, this method is public and you are able to
|
||||
refresh the list of locations any time.
|
||||
"""
|
||||
user = self.connection.request('/login').object
|
||||
self.connection.cache['user'] = user
|
||||
e_ent = get_href(self.connection.cache['user'],
|
||||
'enterprise')
|
||||
ent = self.connection.request(e_ent).object
|
||||
self.connection.cache['enterprise'] = ent
|
||||
|
||||
uri_vdcs = '/cloud/virtualdatacenters'
|
||||
e_vdcs = self.connection.request(uri_vdcs).object
|
||||
|
||||
# Set a dict for the datacenter and its href for a further search
|
||||
params = {"idEnterprise": self._get_enterprise_id()}
|
||||
e_dcs = self.connection.request('/admin/datacenters',
|
||||
params=params).object
|
||||
dc_dict = {}
|
||||
for dc in e_dcs.findall('datacenter'):
|
||||
key = get_href(dc, 'edit')
|
||||
dc_dict[key] = dc
|
||||
|
||||
# Populate locations cache
|
||||
self.connection.cache['locations'] = {}
|
||||
for e_vdc in e_vdcs.findall('virtualDatacenter'):
|
||||
dc_link = get_href(e_vdc, 'datacenter')
|
||||
loc = self._to_location(e_vdc, dc_dict[dc_link], self)
|
||||
|
||||
# Save into cache the link to the itself because we will need
|
||||
# it in the future, but we save here to don't extend the class
|
||||
# :class:`NodeLocation`.
|
||||
# So here we have the dict: :class:`NodeLocation` ->
|
||||
# link_datacenter
|
||||
self.connection.cache['locations'][loc] = get_href(e_vdc, 'edit')
|
||||
|
||||
def ex_create_group(self, name, location=None):
|
||||
"""
|
||||
Create an empty group.
|
||||
|
||||
You can specify the location as well.
|
||||
|
||||
:param group: name of the group (required)
|
||||
:type group: ``str``
|
||||
|
||||
:param location: location were to create the group
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:returns: the created group
|
||||
:rtype: :class:`NodeGroup`
|
||||
"""
|
||||
# prepare the element
|
||||
vapp = ET.Element('virtualAppliance')
|
||||
vapp_name = ET.SubElement(vapp, 'name')
|
||||
vapp_name.text = name
|
||||
|
||||
if location is None:
|
||||
location = self.list_locations()[0]
|
||||
elif location not in self.list_locations():
|
||||
raise LibcloudError('Location does not exist')
|
||||
|
||||
link_vdc = self.connection.cache['locations'][location]
|
||||
e_vdc = self.connection.request(link_vdc).object
|
||||
|
||||
creation_link = get_href(e_vdc, 'virtualappliances')
|
||||
headers = {'Content-type': self.VAPP_MIME_TYPE}
|
||||
vapp = self.connection.request(creation_link, data=tostring(vapp),
|
||||
headers=headers, method='POST').object
|
||||
|
||||
uri_vapp = get_href(vapp, 'edit')
|
||||
|
||||
return NodeGroup(self, vapp.findtext('name'),
|
||||
uri=uri_vapp)
|
||||
|
||||
def ex_destroy_group(self, group):
|
||||
"""
|
||||
Destroy a group.
|
||||
|
||||
Be careful! Destroying a group means destroying all the :class:`Node`s
|
||||
there and the group itself!
|
||||
|
||||
If there is currently any action over any :class:`Node` of the
|
||||
:class:`NodeGroup`, then the method will raise an exception.
|
||||
|
||||
:param name: The group (required)
|
||||
:type name: :class:`NodeGroup`
|
||||
|
||||
:return: If the group was destroyed successfully
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
# Refresh group state
|
||||
e_group = self.connection.request(group.uri).object
|
||||
state = e_group.findtext('state')
|
||||
|
||||
if state not in ['NOT_DEPLOYED', 'DEPLOYED']:
|
||||
error = 'Can not destroy group because of current state'
|
||||
raise LibcloudError(error, self)
|
||||
|
||||
if state == 'DEPLOYED':
|
||||
# prepare the element that forces the undeploy
|
||||
vm_task = ET.Element('virtualmachinetask')
|
||||
force_undeploy = ET.SubElement(vm_task, 'forceUndeploy')
|
||||
force_undeploy.text = 'True'
|
||||
|
||||
# Set the URI
|
||||
undeploy_uri = group.uri + '/action/undeploy'
|
||||
|
||||
# Prepare the headers
|
||||
headers = {'Content-type': self.VM_TASK_MIME_TYPE}
|
||||
res = self.connection.async_request(action=undeploy_uri,
|
||||
method='POST',
|
||||
data=tostring(vm_task),
|
||||
headers=headers)
|
||||
|
||||
if state == 'NOT_DEPLOYED' or res.async_success():
|
||||
# The node is no longer deployed. Unregister it.
|
||||
self.connection.request(action=group.uri,
|
||||
method='DELETE')
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def ex_list_groups(self, location=None):
|
||||
"""
|
||||
List all groups.
|
||||
|
||||
:param location: filter the groups by location (optional)
|
||||
:type location: a :class:`NodeLocation` instance.
|
||||
|
||||
:return: the list of :class:`NodeGroup`
|
||||
"""
|
||||
groups = []
|
||||
for vdc in self._get_locations(location):
|
||||
link_vdc = self.connection.cache['locations'][vdc]
|
||||
e_vdc = self.connection.request(link_vdc).object
|
||||
apps_link = get_href(e_vdc, 'virtualappliances')
|
||||
vapps = self.connection.request(apps_link).object
|
||||
for vapp in vapps.findall('virtualAppliance'):
|
||||
nodes = []
|
||||
vms_link = get_href(vapp, 'virtualmachines')
|
||||
headers = {'Accept': self.NODES_MIME_TYPE}
|
||||
vms = self.connection.request(vms_link, headers=headers).object
|
||||
for vm in vms.findall('virtualmachinewithnode'):
|
||||
nodes.append(self._to_node(vm, self))
|
||||
|
||||
group = NodeGroup(self, vapp.findtext('name'),
|
||||
nodes, get_href(vapp, 'edit'))
|
||||
groups.append(group)
|
||||
|
||||
return groups
|
||||
|
||||
def list_images(self, location=None):
|
||||
"""
|
||||
List images on Abiquo Repositories
|
||||
|
||||
:keyword location: The location to list images for.
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:return: list of node image objects
|
||||
:rtype: ``list`` of :class:`NodeImage`
|
||||
"""
|
||||
enterprise_id = self._get_enterprise_id()
|
||||
uri = '/admin/enterprises/%s/datacenterrepositories/' % (enterprise_id)
|
||||
repos = self.connection.request(uri).object
|
||||
|
||||
images = []
|
||||
for repo in repos.findall('datacenterRepository'):
|
||||
# filter by location. Skips when the name of the location
|
||||
# is different from the 'datacenterRepository' element
|
||||
for vdc in self._get_locations(location):
|
||||
# Check if the virtual datacenter belongs to this repo
|
||||
link_vdc = self.connection.cache['locations'][vdc]
|
||||
e_vdc = self.connection.request(link_vdc).object
|
||||
dc_link_vdc = get_href(e_vdc, 'datacenter')
|
||||
dc_link_repo = get_href(repo, 'datacenter')
|
||||
|
||||
if dc_link_vdc == dc_link_repo:
|
||||
# Filter the template in case we don't have it yet
|
||||
url_templates = get_href(repo, 'virtualmachinetemplates')
|
||||
hypervisor_type = e_vdc.findtext('hypervisorType')
|
||||
params = {'hypervisorTypeName': hypervisor_type}
|
||||
templates = self.connection.request(url_templates,
|
||||
params).object
|
||||
for templ in templates.findall('virtualMachineTemplate'):
|
||||
# Avoid duplicated templates
|
||||
id_template = templ.findtext('id')
|
||||
ids = [image.id for image in images]
|
||||
if id_template not in ids:
|
||||
images.append(self._to_nodeimage(templ, self,
|
||||
get_href(repo,
|
||||
'edit')))
|
||||
|
||||
return images
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
Return list of locations where the user has access to.
|
||||
|
||||
:return: the list of :class:`NodeLocation` available for the current
|
||||
user
|
||||
:rtype: ``list`` of :class:`NodeLocation`
|
||||
"""
|
||||
return list(self.connection.cache['locations'].keys())
|
||||
|
||||
def list_nodes(self, location=None):
|
||||
"""
|
||||
List all nodes.
|
||||
|
||||
:param location: Filter the groups by location (optional)
|
||||
:type location: a :class:`NodeLocation` instance.
|
||||
|
||||
:return: List of node objects
|
||||
:rtype: ``list`` of :class:`Node`
|
||||
"""
|
||||
nodes = []
|
||||
|
||||
for group in self.ex_list_groups(location):
|
||||
nodes.extend(group.nodes)
|
||||
|
||||
return nodes
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
"""
|
||||
List sizes on a provider.
|
||||
|
||||
Abiquo does not work with sizes. However, this method
|
||||
returns a list of predefined ones (copied from :class:`DummyNodeDriver`
|
||||
but without price neither bandwidth) to help the users to create their
|
||||
own.
|
||||
|
||||
If you call the method :class:`AbiquoNodeDriver.create_node` with the
|
||||
size informed, it will just override the 'ram' value of the 'image'
|
||||
template. So it is no too much usefull work with sizes...
|
||||
|
||||
:return: The list of sizes
|
||||
:rtype: ``list`` of :class:`NodeSizes`
|
||||
"""
|
||||
return [
|
||||
NodeSize(id=1,
|
||||
name='Small',
|
||||
ram=128,
|
||||
disk=4,
|
||||
bandwidth=None,
|
||||
price=None,
|
||||
driver=self),
|
||||
NodeSize(id=2,
|
||||
name='Medium',
|
||||
ram=512,
|
||||
disk=16,
|
||||
bandwidth=None,
|
||||
price=None,
|
||||
driver=self),
|
||||
NodeSize(id=3,
|
||||
name='Big',
|
||||
ram=4096,
|
||||
disk=32,
|
||||
bandwidth=None,
|
||||
price=None,
|
||||
driver=self),
|
||||
NodeSize(id=4,
|
||||
name="XXL Big",
|
||||
ram=4096 * 2,
|
||||
disk=32 * 4,
|
||||
bandwidth=None,
|
||||
price=None,
|
||||
driver=self)
|
||||
]
|
||||
|
||||
def reboot_node(self, node):
|
||||
"""
|
||||
Reboot a node.
|
||||
|
||||
:param node: The node to be rebooted
|
||||
:type node: :class:`Node`
|
||||
|
||||
:return: True if the reboot was successful, otherwise False
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
reboot_uri = node.extra['uri_id'] + '/action/reset'
|
||||
res = self.connection.async_request(action=reboot_uri, method='POST')
|
||||
return res.async_success()
|
||||
|
||||
# -------------------------
|
||||
# Extenstion methods
|
||||
# -------------------------
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
"""
|
||||
Set the endpoint as an extra :class:`AbiquoConnection` argument.
|
||||
|
||||
According to Connection code, the "url" argument should be
|
||||
parsed properly to connection.
|
||||
|
||||
:return: ``dict`` of :class:`AbiquoConnection` input arguments
|
||||
"""
|
||||
|
||||
return {'url': self.endpoint}
|
||||
|
||||
def _deploy_remote(self, e_vm):
|
||||
"""
|
||||
Asynchronous call to create the node.
|
||||
"""
|
||||
# --------------------------------------------------------
|
||||
# Deploy the Node
|
||||
# --------------------------------------------------------
|
||||
# prepare the element that forces the deploy
|
||||
vm_task = ET.Element('virtualmachinetask')
|
||||
force_deploy = ET.SubElement(vm_task, 'forceEnterpriseSoftLimits')
|
||||
force_deploy.text = 'True'
|
||||
|
||||
# Prepare the headers
|
||||
headers = {'Content-type': self.VM_TASK_MIME_TYPE}
|
||||
link_deploy = get_href(e_vm, 'deploy')
|
||||
res = self.connection.async_request(action=link_deploy, method='POST',
|
||||
data=tostring(vm_task),
|
||||
headers=headers)
|
||||
if not res.async_success():
|
||||
raise LibcloudError('Could not run the node', self)
|
||||
|
||||
def _to_location(self, vdc, dc, driver):
|
||||
"""
|
||||
Generates the :class:`NodeLocation` class.
|
||||
"""
|
||||
identifier = vdc.findtext('id')
|
||||
name = vdc.findtext('name')
|
||||
country = dc.findtext('name')
|
||||
return NodeLocation(identifier, name, country, driver)
|
||||
|
||||
def _to_node(self, vm, driver):
|
||||
"""
|
||||
Generates the :class:`Node` class.
|
||||
"""
|
||||
identifier = vm.findtext('id')
|
||||
name = vm.findtext('nodeName')
|
||||
state = AbiquoResponse.NODE_STATE_MAP[vm.findtext('state')]
|
||||
|
||||
link_image = get_href(vm, 'virtualmachinetemplate')
|
||||
image_element = self.connection.request(link_image).object
|
||||
repo_link = get_href(image_element, 'datacenterrepository')
|
||||
image = self._to_nodeimage(image_element, self, repo_link)
|
||||
|
||||
# Fill the 'ips' data
|
||||
private_ips = []
|
||||
public_ips = []
|
||||
nics_element = self.connection.request(get_href(vm, 'nics')).object
|
||||
for nic in nics_element.findall('nic'):
|
||||
ip = nic.findtext('ip')
|
||||
for link in nic.findall('link'):
|
||||
rel = link.attrib['rel']
|
||||
if rel == 'privatenetwork':
|
||||
private_ips.append(ip)
|
||||
elif rel in ['publicnetwork', 'externalnetwork',
|
||||
'unmanagednetwork']:
|
||||
public_ips.append(ip)
|
||||
|
||||
extra = {'uri_id': get_href(vm, 'edit')}
|
||||
|
||||
if vm.find('vdrpIp') is not None:
|
||||
extra['vdrp_ip'] = vm.findtext('vdrpIP')
|
||||
extra['vdrp_port'] = vm.findtext('vdrpPort')
|
||||
|
||||
return Node(identifier, name, state, public_ips, private_ips,
|
||||
driver, image=image, extra=extra)
|
||||
|
||||
def _to_nodeimage(self, template, driver, repo):
|
||||
"""
|
||||
Generates the :class:`NodeImage` class.
|
||||
"""
|
||||
identifier = template.findtext('id')
|
||||
name = template.findtext('name')
|
||||
url = get_href(template, 'edit')
|
||||
extra = {'repo': repo, 'url': url}
|
||||
return NodeImage(identifier, name, driver, extra)
|
||||
|
||||
def _get_locations(self, location=None):
|
||||
"""
|
||||
Returns the locations as a generator.
|
||||
"""
|
||||
if location is not None:
|
||||
yield location
|
||||
else:
|
||||
for loc in self.list_locations():
|
||||
yield loc
|
||||
|
||||
def _get_enterprise_id(self):
|
||||
"""
|
||||
Returns the identifier of the logged user's enterprise.
|
||||
"""
|
||||
return self.connection.cache['enterprise'].findtext('id')
|
||||
|
||||
def _define_create_node_location(self, **kwargs):
|
||||
"""
|
||||
Search for a location where to create the node.
|
||||
|
||||
Based on 'create_node' **kwargs argument, decide in which
|
||||
location will be created.
|
||||
"""
|
||||
# First, get image location
|
||||
if 'image' not in kwargs:
|
||||
error = "'image' parameter is mandatory"
|
||||
raise LibcloudError(error, self)
|
||||
|
||||
image = kwargs['image']
|
||||
|
||||
# Get the location argument
|
||||
location = None
|
||||
if 'location' in kwargs:
|
||||
location = kwargs['location']
|
||||
if location not in self.list_locations():
|
||||
raise LibcloudError('Location does not exist')
|
||||
|
||||
# Check if the image is compatible with any of the locations or
|
||||
# the input location
|
||||
loc = None
|
||||
target_loc = None
|
||||
for candidate_loc in self._get_locations(location):
|
||||
link_vdc = self.connection.cache['locations'][candidate_loc]
|
||||
e_vdc = self.connection.request(link_vdc).object
|
||||
# url_location = get_href(e_vdc, 'datacenter')
|
||||
for img in self.list_images(candidate_loc):
|
||||
if img.id == image.id:
|
||||
loc = e_vdc
|
||||
target_loc = candidate_loc
|
||||
break
|
||||
|
||||
if loc is None:
|
||||
error = 'The image can not be used in any location'
|
||||
raise LibcloudError(error, self)
|
||||
|
||||
return loc, target_loc
|
||||
|
||||
def _define_create_node_group(self, xml_loc, loc, **kwargs):
|
||||
"""
|
||||
Search for a group where to create the node.
|
||||
|
||||
If we can not find any group, create it into argument 'location'
|
||||
"""
|
||||
if 'group_name' not in kwargs:
|
||||
group_name = NodeGroup.DEFAULT_GROUP_NAME
|
||||
else:
|
||||
group_name = kwargs['group_name']
|
||||
|
||||
# We search if the group is already defined into the location
|
||||
groups_link = get_href(xml_loc, 'virtualappliances')
|
||||
vapps_element = self.connection.request(groups_link).object
|
||||
target_group = None
|
||||
for vapp in vapps_element.findall('virtualAppliance'):
|
||||
if vapp.findtext('name') == group_name:
|
||||
uri_vapp = get_href(vapp, 'edit')
|
||||
return NodeGroup(self, vapp.findtext('name'), uri=uri_vapp)
|
||||
|
||||
# target group not found: create it. Since it is an extension of
|
||||
# the basic 'libcloud' functionality, we try to be as flexible as
|
||||
# possible.
|
||||
if target_group is None:
|
||||
return self.ex_create_group(group_name, loc)
|
||||
|
||||
def _define_create_node_node(self, group, **kwargs):
|
||||
"""
|
||||
Defines the node before to create.
|
||||
|
||||
In Abiquo, you first need to 'register' or 'define' the node in
|
||||
the API before to create it into the target hypervisor.
|
||||
"""
|
||||
vm = ET.Element('virtualmachinewithnode')
|
||||
if 'name' in kwargs:
|
||||
vmname = ET.SubElement(vm, 'nodeName')
|
||||
vmname.text = kwargs['name']
|
||||
attrib = {'type': 'application/vnd.abiquo/virtualmachinetemplate+xml',
|
||||
'rel': 'virtualmachinetemplate',
|
||||
'href': kwargs['image'].extra['url']}
|
||||
ET.SubElement(vm, 'link', attrib=attrib)
|
||||
headers = {'Content-type': self.NODE_MIME_TYPE}
|
||||
|
||||
if 'size' in kwargs:
|
||||
# Override the 'NodeSize' data
|
||||
ram = ET.SubElement(vm, 'ram')
|
||||
ram.text = str(kwargs['size'].ram)
|
||||
hd = ET.SubElement(vm, 'hdInBytes')
|
||||
hd.text = str(int(kwargs['size'].disk) * self.GIGABYTE)
|
||||
|
||||
# Create the virtual machine
|
||||
nodes_link = group.uri + '/virtualmachines'
|
||||
vm = self.connection.request(nodes_link, data=tostring(vm),
|
||||
headers=headers, method='POST').object
|
||||
edit_vm = get_href(vm, 'edit')
|
||||
headers = {'Accept': self.NODE_MIME_TYPE}
|
||||
|
||||
return self.connection.request(edit_vm, headers=headers).object
|
||||
|
||||
|
||||
class NodeGroup(object):
|
||||
"""
|
||||
Group of virtual machines that can be managed together
|
||||
|
||||
All :class:`Node`s in Abiquo must be defined inside a Virtual Appliance.
|
||||
We offer a way to handle virtual appliances (called NodeGroup to
|
||||
maintain some kind of name conventions here) inside the
|
||||
:class:`AbiquoNodeDriver` without breaking compatibility of the rest of
|
||||
libcloud API.
|
||||
|
||||
If the user does not want to handle groups, all the virtual machines
|
||||
will be created inside a group named 'libcloud'
|
||||
"""
|
||||
DEFAULT_GROUP_NAME = 'libcloud'
|
||||
|
||||
def __init__(self, driver, name=DEFAULT_GROUP_NAME, nodes=[], uri=''):
|
||||
"""
|
||||
Initialize a new group object.
|
||||
"""
|
||||
self.driver = driver
|
||||
self.name = name
|
||||
self.nodes = nodes
|
||||
self.uri = uri
|
||||
|
||||
def __repr__(self):
|
||||
return (('<NodeGroup: name=%s, nodes=[%s] >')
|
||||
% (self.name, ",".join(map(str, self.nodes))))
|
||||
|
||||
def destroy(self):
|
||||
"""
|
||||
Destroys the group delegating the execution to
|
||||
:class:`AbiquoNodeDriver`.
|
||||
"""
|
||||
return self.driver.ex_destroy_group(self)
|
||||
235
awx/lib/site-packages/libcloud/compute/drivers/bluebox.py
Normal file
235
awx/lib/site-packages/libcloud/compute/drivers/bluebox.py
Normal file
@@ -0,0 +1,235 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
libcloud driver for the Blue Box Blocks API
|
||||
|
||||
This driver implements all libcloud functionality for the Blue Box Blocks API.
|
||||
|
||||
Blue Box home page http://bluebox.net
|
||||
Blue Box API documentation https://boxpanel.bluebox
|
||||
.net/public/the_vault/index.php/Blocks_API
|
||||
"""
|
||||
|
||||
import copy
|
||||
import base64
|
||||
|
||||
from libcloud.utils.py3 import urlencode
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.types import NodeState, InvalidCredsError
|
||||
from libcloud.compute.base import Node, NodeDriver
|
||||
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
|
||||
from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
|
||||
|
||||
# Current end point for Blue Box API.
|
||||
BLUEBOX_API_HOST = "boxpanel.bluebox.net"
|
||||
|
||||
# The API doesn't currently expose all of the required values for libcloud,
|
||||
# so we simply list what's available right now, along with all of the various
|
||||
# attributes that are needed by libcloud.
|
||||
BLUEBOX_INSTANCE_TYPES = {
|
||||
'1gb': {
|
||||
'id': '94fd37a7-2606-47f7-84d5-9000deda52ae',
|
||||
'name': 'Block 1GB Virtual Server',
|
||||
'ram': 1024,
|
||||
'disk': 20,
|
||||
'cpu': 0.5
|
||||
},
|
||||
'2gb': {
|
||||
'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092',
|
||||
'name': 'Block 2GB Virtual Server',
|
||||
'ram': 2048,
|
||||
'disk': 25,
|
||||
'cpu': 1
|
||||
},
|
||||
'4gb': {
|
||||
'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58',
|
||||
'name': 'Block 4GB Virtual Server',
|
||||
'ram': 4096,
|
||||
'disk': 50,
|
||||
'cpu': 2
|
||||
},
|
||||
'8gb': {
|
||||
'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251',
|
||||
'name': 'Block 8GB Virtual Server',
|
||||
'ram': 8192,
|
||||
'disk': 100,
|
||||
'cpu': 4
|
||||
}
|
||||
}
|
||||
|
||||
RAM_PER_CPU = 2048
|
||||
|
||||
NODE_STATE_MAP = {'queued': NodeState.PENDING,
|
||||
'building': NodeState.PENDING,
|
||||
'running': NodeState.RUNNING,
|
||||
'error': NodeState.TERMINATED,
|
||||
'unknown': NodeState.UNKNOWN}
|
||||
|
||||
|
||||
class BlueboxResponse(JsonResponse):
|
||||
def parse_error(self):
|
||||
if int(self.status) == 401:
|
||||
if not self.body:
|
||||
raise InvalidCredsError(str(self.status) + ': ' + self.error)
|
||||
else:
|
||||
raise InvalidCredsError(self.body)
|
||||
return self.body
|
||||
|
||||
|
||||
class BlueboxNodeSize(NodeSize):
|
||||
def __init__(self, id, name, cpu, ram, disk, price, driver):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.cpu = cpu
|
||||
self.ram = ram
|
||||
self.disk = disk
|
||||
self.price = price
|
||||
self.driver = driver
|
||||
|
||||
def __repr__(self):
|
||||
return ((
|
||||
'<NodeSize: id=%s, name=%s, cpu=%s, ram=%s, disk=%s, '
|
||||
'price=%s, driver=%s ...>')
|
||||
% (self.id, self.name, self.cpu, self.ram, self.disk,
|
||||
self.price, self.driver.name))
|
||||
|
||||
|
||||
class BlueboxConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the Bluebox driver
|
||||
"""
|
||||
|
||||
host = BLUEBOX_API_HOST
|
||||
secure = True
|
||||
responseCls = BlueboxResponse
|
||||
|
||||
allow_insecure = False
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
|
||||
headers['Authorization'] = 'Basic %s' % (user_b64)
|
||||
return headers
|
||||
|
||||
|
||||
class BlueboxNodeDriver(NodeDriver):
|
||||
"""
|
||||
Bluebox Blocks node driver
|
||||
"""
|
||||
|
||||
connectionCls = BlueboxConnection
|
||||
type = Provider.BLUEBOX
|
||||
api_name = 'bluebox'
|
||||
name = 'Bluebox Blocks'
|
||||
website = 'http://bluebox.net'
|
||||
features = {'create_node': ['ssh_key', 'password']}
|
||||
|
||||
def list_nodes(self):
|
||||
result = self.connection.request('/api/blocks.json')
|
||||
return [self._to_node(i) for i in result.object]
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
sizes = []
|
||||
for key, values in list(BLUEBOX_INSTANCE_TYPES.items()):
|
||||
attributes = copy.deepcopy(values)
|
||||
attributes.update({'price': self._get_size_price(size_id=key)})
|
||||
sizes.append(BlueboxNodeSize(driver=self.connection.driver,
|
||||
**attributes))
|
||||
|
||||
return sizes
|
||||
|
||||
def list_images(self, location=None):
|
||||
result = self.connection.request('/api/block_templates.json')
|
||||
images = []
|
||||
for image in result.object:
|
||||
images.extend([self._to_image(image)])
|
||||
|
||||
return images
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
size = kwargs["size"]
|
||||
|
||||
name = kwargs['name']
|
||||
image = kwargs['image']
|
||||
size = kwargs['size']
|
||||
|
||||
auth = self._get_and_check_auth(kwargs.get('auth'))
|
||||
|
||||
data = {
|
||||
'hostname': name,
|
||||
'product': size.id,
|
||||
'template': image.id
|
||||
}
|
||||
|
||||
ssh = None
|
||||
password = None
|
||||
|
||||
if isinstance(auth, NodeAuthSSHKey):
|
||||
ssh = auth.pubkey
|
||||
data.update(ssh_public_key=ssh)
|
||||
elif isinstance(auth, NodeAuthPassword):
|
||||
password = auth.password
|
||||
data.update(password=password)
|
||||
|
||||
if "ex_username" in kwargs:
|
||||
data.update(username=kwargs["ex_username"])
|
||||
|
||||
if not ssh and not password:
|
||||
raise Exception("SSH public key or password required.")
|
||||
|
||||
params = urlencode(data)
|
||||
result = self.connection.request('/api/blocks.json', headers=headers,
|
||||
data=params, method='POST')
|
||||
node = self._to_node(result.object)
|
||||
|
||||
if getattr(auth, "generated", False):
|
||||
node.extra['password'] = auth.password
|
||||
|
||||
return node
|
||||
|
||||
def destroy_node(self, node):
|
||||
url = '/api/blocks/%s.json' % (node.id)
|
||||
result = self.connection.request(url, method='DELETE')
|
||||
|
||||
return result.status == 200
|
||||
|
||||
def list_locations(self):
|
||||
return [NodeLocation(0, "Blue Box Seattle US", 'US', self)]
|
||||
|
||||
def reboot_node(self, node):
|
||||
url = '/api/blocks/%s/reboot.json' % (node.id)
|
||||
result = self.connection.request(url, method="PUT")
|
||||
return result.status == 200
|
||||
|
||||
def _to_node(self, vm):
|
||||
state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)]
|
||||
n = Node(id=vm['id'],
|
||||
name=vm['hostname'],
|
||||
state=state,
|
||||
public_ips=[ip['address'] for ip in vm['ips']],
|
||||
private_ips=[],
|
||||
extra={'storage': vm['storage'], 'cpu': vm['cpu']},
|
||||
driver=self.connection.driver)
|
||||
return n
|
||||
|
||||
def _to_image(self, image):
|
||||
image = NodeImage(id=image['id'],
|
||||
name=image['description'],
|
||||
driver=self.connection.driver)
|
||||
return image
|
||||
306
awx/lib/site-packages/libcloud/compute/drivers/brightbox.py
Normal file
306
awx/lib/site-packages/libcloud/compute/drivers/brightbox.py
Normal file
@@ -0,0 +1,306 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Brightbox Driver
|
||||
"""
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.brightbox import BrightboxConnection
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
from libcloud.compute.base import NodeDriver
|
||||
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
|
||||
|
||||
import base64
|
||||
|
||||
|
||||
API_VERSION = '1.0'
|
||||
|
||||
|
||||
def _extract(d, keys):
|
||||
return dict((k, d[k]) for k in keys if k in d and d[k] is not None)
|
||||
|
||||
|
||||
class BrightboxNodeDriver(NodeDriver):
|
||||
"""
|
||||
Brightbox node driver
|
||||
"""
|
||||
|
||||
connectionCls = BrightboxConnection
|
||||
|
||||
type = Provider.BRIGHTBOX
|
||||
name = 'Brightbox'
|
||||
website = 'http://www.brightbox.co.uk/'
|
||||
|
||||
NODE_STATE_MAP = {'creating': NodeState.PENDING,
|
||||
'active': NodeState.RUNNING,
|
||||
'inactive': NodeState.UNKNOWN,
|
||||
'deleting': NodeState.UNKNOWN,
|
||||
'deleted': NodeState.TERMINATED,
|
||||
'failed': NodeState.UNKNOWN,
|
||||
'unavailable': NodeState.UNKNOWN}
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
api_version=API_VERSION, **kwargs):
|
||||
super(BrightboxNodeDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure,
|
||||
host=host, port=port,
|
||||
api_version=api_version,
|
||||
**kwargs)
|
||||
|
||||
def _to_node(self, data):
|
||||
extra_data = _extract(data, ['fqdn', 'user_data', 'status',
|
||||
'interfaces', 'snapshots',
|
||||
'server_groups', 'hostname',
|
||||
'started_at', 'created_at',
|
||||
'deleted_at'])
|
||||
extra_data['zone'] = self._to_location(data['zone'])
|
||||
|
||||
ipv6_addresses = [interface['ipv6_address'] for interface
|
||||
in data['interfaces'] if 'ipv6_address' in interface]
|
||||
|
||||
private_ips = [interface['ipv4_address']
|
||||
for interface in data['interfaces']
|
||||
if 'ipv4_address' in interface]
|
||||
|
||||
public_ips = [cloud_ip['public_ip'] for cloud_ip in data['cloud_ips']]
|
||||
public_ips += ipv6_addresses
|
||||
|
||||
return Node(
|
||||
id=data['id'],
|
||||
name=data['name'],
|
||||
state=self.NODE_STATE_MAP[data['status']],
|
||||
private_ips=private_ips,
|
||||
public_ips=public_ips,
|
||||
driver=self.connection.driver,
|
||||
size=self._to_size(data['server_type']),
|
||||
image=self._to_image(data['image']),
|
||||
extra=extra_data
|
||||
)
|
||||
|
||||
def _to_image(self, data):
|
||||
extra_data = _extract(data, ['arch', 'compatibility_mode',
|
||||
'created_at', 'description',
|
||||
'disk_size', 'min_ram', 'official',
|
||||
'owner', 'public', 'source',
|
||||
'source_type', 'status', 'username',
|
||||
'virtual_size', 'licence_name'])
|
||||
|
||||
if data.get('ancestor', None):
|
||||
extra_data['ancestor'] = self._to_image(data['ancestor'])
|
||||
|
||||
return NodeImage(
|
||||
id=data['id'],
|
||||
name=data['name'],
|
||||
driver=self,
|
||||
extra=extra_data
|
||||
)
|
||||
|
||||
def _to_size(self, data):
|
||||
return NodeSize(
|
||||
id=data['id'],
|
||||
name=data['name'],
|
||||
ram=data['ram'],
|
||||
disk=data['disk_size'],
|
||||
bandwidth=0,
|
||||
price=0,
|
||||
driver=self
|
||||
)
|
||||
|
||||
def _to_location(self, data):
|
||||
if data:
|
||||
return NodeLocation(
|
||||
id=data['id'],
|
||||
name=data['handle'],
|
||||
country='GB',
|
||||
driver=self
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
def _post(self, path, data={}):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
return self.connection.request(path, data=data, headers=headers,
|
||||
method='POST')
|
||||
|
||||
def _put(self, path, data={}):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
return self.connection.request(path, data=data, headers=headers,
|
||||
method='PUT')
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create a new Brightbox node
|
||||
|
||||
Reference: https://api.gb1.brightbox.com/1.0/#server_create_server
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword ex_userdata: User data
|
||||
:type ex_userdata: ``str``
|
||||
|
||||
:keyword ex_servergroup: Name or list of server group ids to
|
||||
add server to
|
||||
:type ex_servergroup: ``str`` or ``list`` of ``str``
|
||||
"""
|
||||
data = {
|
||||
'name': kwargs['name'],
|
||||
'server_type': kwargs['size'].id,
|
||||
'image': kwargs['image'].id,
|
||||
}
|
||||
|
||||
if 'ex_userdata' in kwargs:
|
||||
data['user_data'] = base64.b64encode(b(kwargs['ex_userdata'])) \
|
||||
.decode('ascii')
|
||||
|
||||
if 'location' in kwargs:
|
||||
data['zone'] = kwargs['location'].id
|
||||
|
||||
if 'ex_servergroup' in kwargs:
|
||||
if not isinstance(kwargs['ex_servergroup'], list):
|
||||
kwargs['ex_servergroup'] = [kwargs['ex_servergroup']]
|
||||
data['server_groups'] = kwargs['ex_servergroup']
|
||||
|
||||
data = self._post('/%s/servers' % self.api_version, data).object
|
||||
return self._to_node(data)
|
||||
|
||||
def destroy_node(self, node):
|
||||
response = self.connection.request(
|
||||
'/%s/servers/%s' % (self.api_version, node.id),
|
||||
method='DELETE')
|
||||
return response.status == httplib.ACCEPTED
|
||||
|
||||
def list_nodes(self):
|
||||
data = self.connection.request('/%s/servers' % self.api_version).object
|
||||
return list(map(self._to_node, data))
|
||||
|
||||
def list_images(self, location=None):
|
||||
data = self.connection.request('/%s/images' % self.api_version).object
|
||||
return list(map(self._to_image, data))
|
||||
|
||||
def list_sizes(self):
|
||||
data = self.connection.request('/%s/server_types' % self.api_version) \
|
||||
.object
|
||||
return list(map(self._to_size, data))
|
||||
|
||||
def list_locations(self):
|
||||
data = self.connection.request('/%s/zones' % self.api_version).object
|
||||
return list(map(self._to_location, data))
|
||||
|
||||
def ex_list_cloud_ips(self):
|
||||
"""
|
||||
List Cloud IPs
|
||||
|
||||
@note: This is an API extension for use on Brightbox
|
||||
|
||||
:rtype: ``list`` of ``dict``
|
||||
"""
|
||||
return self.connection.request('/%s/cloud_ips' % self.api_version) \
|
||||
.object
|
||||
|
||||
def ex_create_cloud_ip(self, reverse_dns=None):
|
||||
"""
|
||||
Requests a new cloud IP address for the account
|
||||
|
||||
@note: This is an API extension for use on Brightbox
|
||||
|
||||
:param reverse_dns: Reverse DNS hostname
|
||||
:type reverse_dns: ``str``
|
||||
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
params = {}
|
||||
|
||||
if reverse_dns:
|
||||
params['reverse_dns'] = reverse_dns
|
||||
|
||||
return self._post('/%s/cloud_ips' % self.api_version, params).object
|
||||
|
||||
def ex_update_cloud_ip(self, cloud_ip_id, reverse_dns):
|
||||
"""
|
||||
Update some details of the cloud IP address
|
||||
|
||||
@note: This is an API extension for use on Brightbox
|
||||
|
||||
:param cloud_ip_id: The id of the cloud ip.
|
||||
:type cloud_ip_id: ``str``
|
||||
|
||||
:param reverse_dns: Reverse DNS hostname
|
||||
:type reverse_dns: ``str``
|
||||
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
response = self._put('/%s/cloud_ips/%s' % (self.api_version,
|
||||
cloud_ip_id),
|
||||
{'reverse_dns': reverse_dns})
|
||||
return response.status == httplib.OK
|
||||
|
||||
def ex_map_cloud_ip(self, cloud_ip_id, interface_id):
|
||||
"""
|
||||
Maps (or points) a cloud IP address at a server's interface
|
||||
or a load balancer to allow them to respond to public requests
|
||||
|
||||
@note: This is an API extension for use on Brightbox
|
||||
|
||||
:param cloud_ip_id: The id of the cloud ip.
|
||||
:type cloud_ip_id: ``str``
|
||||
|
||||
:param interface_id: The Interface ID or LoadBalancer ID to
|
||||
which this Cloud IP should be mapped to
|
||||
:type interface_id: ``str``
|
||||
|
||||
:return: True if the mapping was successful.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
response = self._post('/%s/cloud_ips/%s/map' % (self.api_version,
|
||||
cloud_ip_id),
|
||||
{'destination': interface_id})
|
||||
return response.status == httplib.ACCEPTED
|
||||
|
||||
def ex_unmap_cloud_ip(self, cloud_ip_id):
|
||||
"""
|
||||
Unmaps a cloud IP address from its current destination making
|
||||
it available to remap. This remains in the account's pool
|
||||
of addresses
|
||||
|
||||
@note: This is an API extension for use on Brightbox
|
||||
|
||||
:param cloud_ip_id: The id of the cloud ip.
|
||||
:type cloud_ip_id: ``str``
|
||||
|
||||
:return: True if the unmap was successful.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
response = self._post('/%s/cloud_ips/%s/unmap' % (self.api_version,
|
||||
cloud_ip_id))
|
||||
return response.status == httplib.ACCEPTED
|
||||
|
||||
def ex_destroy_cloud_ip(self, cloud_ip_id):
|
||||
"""
|
||||
Release the cloud IP address from the account's ownership
|
||||
|
||||
@note: This is an API extension for use on Brightbox
|
||||
|
||||
:param cloud_ip_id: The id of the cloud ip.
|
||||
:type cloud_ip_id: ``str``
|
||||
|
||||
:return: True if the unmap was successful.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
response = self.connection.request(
|
||||
'/%s/cloud_ips/%s' % (self.api_version,
|
||||
cloud_ip_id),
|
||||
method='DELETE')
|
||||
return response.status == httplib.OK
|
||||
431
awx/lib/site-packages/libcloud/compute/drivers/cloudframes.py
Normal file
431
awx/lib/site-packages/libcloud/compute/drivers/cloudframes.py
Normal file
@@ -0,0 +1,431 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
CloudFrames Driver
|
||||
|
||||
"""
|
||||
|
||||
# (name, ram, disk, bandwidth, price, vcpus)
|
||||
SIZES = [
|
||||
('512mb_1core_10gb', 512, 10, 512, 0.025, 1),
|
||||
('1024mb_1core_20gb', 1024, 20, 512, 0.05, 1),
|
||||
('2048mb_2core_50gb', 2048, 50, 1024, 0.10, 2),
|
||||
('4096mb_2core_100gb', 4096, 100, 2048, 0.20, 2),
|
||||
('8192mb_4core_200gb', 8192, 200, 2048, 0.40, 4),
|
||||
('16384mb_4core_400gb', 16384, 400, 4096, 0.80, 4),
|
||||
]
|
||||
|
||||
import base64
|
||||
import random
|
||||
|
||||
from libcloud.utils.py3 import urlparse, b
|
||||
from libcloud.common.base import ConnectionKey
|
||||
from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
|
||||
from libcloud.common.types import ProviderError
|
||||
from libcloud.compute.base import NodeImage, NodeSize, Node, NodeLocation
|
||||
from libcloud.compute.base import NodeDriver
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
|
||||
|
||||
class CloudFramesException(ProviderError):
|
||||
pass
|
||||
|
||||
|
||||
class CloudFramesComponent(object):
|
||||
"""
|
||||
Represents a node in the cloudapi path.
|
||||
"""
|
||||
|
||||
def __init__(self, cloudFramesConnection, name):
|
||||
self.cloudFramesConnection = cloudFramesConnection
|
||||
self.name = name
|
||||
|
||||
def __getattr__(self, key):
|
||||
return self.method(key)
|
||||
|
||||
def method(self, methodname):
|
||||
def foo(*args, **kwargs):
|
||||
async = kwargs.get('async', False)
|
||||
args = list(args)
|
||||
args.append('') # jobguid
|
||||
args.append({'wait': False} if async else {}) # executionparams
|
||||
response = self.cloudFramesConnection.request(
|
||||
'cloud_api_%s.%s' % (self.name, methodname), *args)
|
||||
if not response.success():
|
||||
response.parse_error()
|
||||
if async:
|
||||
return response.parse_body()['jobguid']
|
||||
else:
|
||||
return response.parse_body()['result']
|
||||
return foo
|
||||
|
||||
|
||||
class CloudFramesNodeSize(NodeSize):
|
||||
|
||||
def __init__(self, id, name, ram, disk, bandwidth, price, driver,
|
||||
vcpus=None):
|
||||
super(CloudFramesNodeSize, self).__init__(
|
||||
id, name, ram, disk, bandwidth, price, driver)
|
||||
self.vcpus = vcpus
|
||||
|
||||
|
||||
class CloudFramesNode(Node):
|
||||
|
||||
def list_snapshots(self):
|
||||
return self.driver.ex_list_snapshots(self)
|
||||
|
||||
def snapshot(self, label='', description=''):
|
||||
return self.driver.ex_snapshot_node(self, label, description)
|
||||
|
||||
def rollback(self, snapshot):
|
||||
return self.driver.ex_rollback_node(self, snapshot)
|
||||
|
||||
|
||||
class CloudFramesSnapshot(object):
|
||||
|
||||
def __init__(self, id, timestamp, label, description, driver):
|
||||
self.id = id
|
||||
self.timestamp = timestamp
|
||||
self.label = label
|
||||
self.description = description
|
||||
self.driver = driver
|
||||
|
||||
def destroy(self):
|
||||
self.driver.ex_destroy_snapshot(self)
|
||||
|
||||
|
||||
class CloudFramesConnection(XMLRPCConnection, ConnectionKey):
|
||||
"""
|
||||
Cloudapi connection class
|
||||
"""
|
||||
|
||||
repsonseCls = XMLRPCResponse
|
||||
base_url = None
|
||||
|
||||
def __init__(self, key=None, secret=None, secure=True,
|
||||
host=None, port=None, url=None, timeout=None):
|
||||
"""
|
||||
:param key: The username to connect with to the cloudapi
|
||||
:type key: ``str``
|
||||
|
||||
:param secret: The password to connect with to the cloudapi
|
||||
:type secret: ``str``
|
||||
|
||||
:param secure: Should always be false at the moment
|
||||
:type secure: ``bool``
|
||||
|
||||
:param host: The hostname of the cloudapi
|
||||
:type host: ``str``
|
||||
|
||||
:param port: The port on which to connect to the cloudapi
|
||||
:type port: ``int``
|
||||
|
||||
:param url: Url to the cloudapi (can replace all above)
|
||||
:type url: ``str``
|
||||
"""
|
||||
|
||||
super(CloudFramesConnection, self).__init__(key=key, secure=secure,
|
||||
host=host, port=port,
|
||||
url=url, timeout=timeout)
|
||||
self._auth = base64.b64encode(
|
||||
b('%s:%s' % (key, secret))).decode('utf-8')
|
||||
self.endpoint = url
|
||||
|
||||
def __getattr__(self, key):
|
||||
return CloudFramesComponent(self, key)
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Authorization'] = 'Basic %s' % self._auth
|
||||
return headers
|
||||
|
||||
|
||||
class CloudFramesNodeDriver(NodeDriver):
|
||||
"""
|
||||
CloudFrames node driver
|
||||
"""
|
||||
|
||||
connectionCls = CloudFramesConnection
|
||||
|
||||
name = 'CloudFrames'
|
||||
api_name = 'cloudframes'
|
||||
website = 'http://www.cloudframes.net/'
|
||||
type = Provider.CLOUDFRAMES
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'CONFIGURED': NodeState.PENDING,
|
||||
'CREATED': NodeState.PENDING,
|
||||
'DELETING': NodeState.PENDING,
|
||||
'HALTED': NodeState.TERMINATED,
|
||||
'IMAGEONLY': NodeState.UNKNOWN,
|
||||
'ISCSIEXPOSED': NodeState.PENDING,
|
||||
'MOVING': NodeState.PENDING,
|
||||
'OVERLOADED': NodeState.UNKNOWN,
|
||||
'PAUSED': NodeState.TERMINATED,
|
||||
'RUNNING': NodeState.RUNNING,
|
||||
'STARTING': NodeState.PENDING,
|
||||
'STOPPING': NodeState.PENDING,
|
||||
'SYNCING': NodeState.PENDING,
|
||||
'TODELETE': NodeState.PENDING,
|
||||
}
|
||||
|
||||
# subclassed internal methods
|
||||
def __init__(self, key=None, secret=None, secure=True,
|
||||
host=None, port=None, url=None, **kwargs):
|
||||
if not port:
|
||||
port = 443 if secure else 80
|
||||
if url:
|
||||
if not url.endswith('/'):
|
||||
url += '/'
|
||||
scheme, netloc, _, _, _, _ = urlparse.urlparse(url)
|
||||
secure = (scheme == 'https')
|
||||
if '@' in netloc:
|
||||
auth, hostport = netloc.rsplit('@', 1)
|
||||
if ':' in auth:
|
||||
key, secret = auth.split(':', 1)
|
||||
else:
|
||||
key = auth
|
||||
else:
|
||||
hostport = netloc
|
||||
if ':' in hostport:
|
||||
host, port = hostport.split(':')
|
||||
else:
|
||||
host = hostport
|
||||
hostport = '%s:%s' % (host, port)
|
||||
url = url.replace(netloc, hostport)
|
||||
else:
|
||||
url = '%s://%s:%s/appserver/xmlrpc/' % (
|
||||
'https' if secure else 'http', host, port)
|
||||
|
||||
if secure:
|
||||
raise NotImplementedError(
|
||||
'The cloudapi only supports unsecure connections')
|
||||
|
||||
if key is None or secret is None:
|
||||
raise NotImplementedError(
|
||||
'Unauthenticated support to the cloudapi is not supported')
|
||||
|
||||
# connection url
|
||||
self._url = url
|
||||
|
||||
# cached attributes
|
||||
self.__cloudspaceguid = None
|
||||
self.__languid = None
|
||||
self.__locations = []
|
||||
|
||||
super(CloudFramesNodeDriver, self).__init__(
|
||||
key, secret, secure, host, port, **kwargs)
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
return {'url': self._url}
|
||||
|
||||
# internal methods
|
||||
@property
|
||||
def _cloudspaceguid(self):
|
||||
if not self.__cloudspaceguid:
|
||||
self.__cloudspaceguid = self.connection.cloudspace.find(
|
||||
'', '', 'cloud', '')[0]
|
||||
return self.__cloudspaceguid
|
||||
|
||||
@property
|
||||
def _languid(self):
|
||||
if not self.__languid:
|
||||
self.__languid = self.connection.lan.find(
|
||||
'', '', 'public_virtual', '', '', '', '', '', '', '', '', '',
|
||||
'', '', '', '', '')[0]
|
||||
return self.__languid
|
||||
|
||||
def _get_machine_data(self, guid):
|
||||
"""
|
||||
Looks up some basic data related to the given machine guid.
|
||||
"""
|
||||
try:
|
||||
d = self.connection.machine.list('', '', '', guid, '')[0]
|
||||
except IndexError:
|
||||
raise CloudFramesException('VM no longer exists', 404, self)
|
||||
d['public_ips'] = []
|
||||
d['private_ips'] = []
|
||||
d['size'] = None
|
||||
d['image'] = None
|
||||
return d
|
||||
|
||||
def _machine_find(self, template=False, machinetype=None,
|
||||
machinerole=None):
|
||||
# the cloudframes xmlrpc api requires you to pass all args and kwargs
|
||||
# as positional arguments, you can't use keywords arguments
|
||||
if not machinetype:
|
||||
guids = []
|
||||
for machinetype in ['VIRTUALSERVER', 'VIRTUALDESKTOP']:
|
||||
guids += self.connection.machine.find(
|
||||
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
|
||||
'', '', machinetype, template, '', '', '', '', '', '', '',
|
||||
'', '', '', '', '', '', '')
|
||||
else:
|
||||
guids = self.connection.machine.find(
|
||||
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
|
||||
'', '', machinetype, '', '', '', '', '', '', '', '',
|
||||
machinerole, '', '', '', '', '', '')
|
||||
return guids
|
||||
|
||||
def _to_image(self, image_dict):
|
||||
return NodeImage(id=image_dict['guid'],
|
||||
name=image_dict['name'],
|
||||
driver=self.connection.driver)
|
||||
|
||||
def _to_size(self, id, name, ram, disk, bandwidth, price, vcpus):
|
||||
return CloudFramesNodeSize(
|
||||
id, name, ram, disk, bandwidth, price, self, vcpus)
|
||||
|
||||
def _to_location(self, location_dict):
|
||||
return NodeLocation(id=location_dict['guid'],
|
||||
name=location_dict['name'],
|
||||
country=None,
|
||||
driver=self)
|
||||
|
||||
def _to_node(self, node_dict):
|
||||
# only return nodes which can be worked with
|
||||
# (ignore cloudframes internal autotests and deleted nodes)
|
||||
if node_dict['status'] == 'CONFIGURED':
|
||||
return None
|
||||
return CloudFramesNode(id=node_dict['guid'],
|
||||
name=node_dict['name'],
|
||||
state=self.NODE_STATE_MAP.get(
|
||||
node_dict['status'], NodeState.UNKNOWN),
|
||||
public_ips=node_dict['public_ips'],
|
||||
private_ips=node_dict['private_ips'],
|
||||
driver=self.connection.driver,
|
||||
size=node_dict['size'],
|
||||
image=node_dict['image'],
|
||||
extra={})
|
||||
|
||||
def _to_snapshot(self, snapshot_dict):
|
||||
return CloudFramesSnapshot(id=snapshot_dict['guid'],
|
||||
timestamp=snapshot_dict['timestamp'],
|
||||
label=snapshot_dict['backuplabel'],
|
||||
description=snapshot_dict['description'],
|
||||
driver=self)
|
||||
|
||||
# subclassed public methods, and provider specific public methods
|
||||
def list_images(self, location=None):
|
||||
image_ids = self._machine_find(template=True)
|
||||
image_list = []
|
||||
for image_id in image_ids:
|
||||
image_list.append(self._to_image(self._get_machine_data(image_id)))
|
||||
return image_list
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
sizes = []
|
||||
for id in range(len(SIZES)):
|
||||
sizes.append(self._to_size(id, *SIZES[id]))
|
||||
return sizes
|
||||
|
||||
def list_locations(self, ex_use_cached=True):
|
||||
if not self.__locations or not ex_use_cached:
|
||||
self.__locations = []
|
||||
for location_id in self._machine_find(machinetype='PHYSICAL',
|
||||
machinerole='COMPUTENODE'):
|
||||
self.__locations.append(
|
||||
self._to_location(self._get_machine_data(location_id)))
|
||||
return self.__locations
|
||||
|
||||
def list_nodes(self):
|
||||
node_ids = self._machine_find()
|
||||
node_list = []
|
||||
for node_id in node_ids:
|
||||
node = self._to_node(self._get_machine_data(node_id))
|
||||
if node:
|
||||
node_list.append(node)
|
||||
return node_list
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Creates a new node, by cloning the template provided.
|
||||
|
||||
If no location object is passed, a random location will be used.
|
||||
|
||||
|
||||
:param image: The template to be cloned (required)
|
||||
:type image: ``list`` of :class:`NodeImage`
|
||||
|
||||
:param name: The name for the new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:param size: The size of the new node (required)
|
||||
:type size: ``list`` of :class:`NodeSize`
|
||||
|
||||
:param location: The location to create the new node
|
||||
:type location: ``list`` of :class:`NodeLocation`
|
||||
|
||||
:param default_gateway: The default gateway to be used
|
||||
:type default_gateway: ``str``
|
||||
|
||||
:param extra: Additional requirements (extra disks fi.)
|
||||
:type extra: ``dict``
|
||||
|
||||
|
||||
:returns: ``list`` of :class:`Node` -- The newly created Node object
|
||||
|
||||
:raises: CloudFramesException
|
||||
"""
|
||||
|
||||
additionalinfo = kwargs.get('extra', {})
|
||||
additionalinfo.update({
|
||||
'memory': kwargs['size'].ram,
|
||||
'cpu': kwargs['size'].vcpus,
|
||||
})
|
||||
guid = self.connection.machine.createFromTemplate(
|
||||
self._cloudspaceguid, kwargs['image'].id, kwargs['name'],
|
||||
[{'languid': self._languid}], kwargs['name'],
|
||||
kwargs.get('location', random.choice(self.list_locations())).id,
|
||||
kwargs.get('default_gateway', ''), None, additionalinfo)
|
||||
if not self.connection.machine.start(guid):
|
||||
raise CloudFramesException(
|
||||
'failed to start machine after creation', 500, self)
|
||||
return self._to_node(self._get_machine_data(guid))
|
||||
|
||||
def destroy_node(self, node):
|
||||
return self.connection.machine.delete(node.id, False)
|
||||
|
||||
def reboot_node(self, node, ex_clean=True):
|
||||
return self.connection.machine.reboot(node.id, ex_clean)
|
||||
|
||||
def ex_snapshot_node(self, node, label='', description=''):
|
||||
guid = self.connection.machine.snapshot(
|
||||
node.id, label, description, False, False, 'PAUSED')
|
||||
for snapshot in self.ex_list_snapshots(node):
|
||||
if snapshot.id == guid:
|
||||
return snapshot
|
||||
else:
|
||||
raise CloudFramesException('Snapshot creation failed', 500, self)
|
||||
|
||||
def ex_rollback_node(self, node, snapshot):
|
||||
if not node.state == NodeState.TERMINATED:
|
||||
self.connection.machine.stop(node.id, False, 930)
|
||||
success = self.connection.machine.rollback(node.id, snapshot.id)
|
||||
self.connection.machine.start(node.id)
|
||||
return success
|
||||
|
||||
def ex_list_snapshots(self, node):
|
||||
return [self._to_snapshot(snapshot_dict) for snapshot_dict in
|
||||
self.connection.machine.listSnapshots(node.id, False, '', '')]
|
||||
|
||||
def ex_destroy_snapshot(self, node, snapshot):
|
||||
return self.connection.machine.delete(snapshot.id, False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
|
||||
doctest.testmod()
|
||||
2093
awx/lib/site-packages/libcloud/compute/drivers/cloudsigma.py
Normal file
2093
awx/lib/site-packages/libcloud/compute/drivers/cloudsigma.py
Normal file
File diff suppressed because it is too large
Load Diff
2208
awx/lib/site-packages/libcloud/compute/drivers/cloudstack.py
Normal file
2208
awx/lib/site-packages/libcloud/compute/drivers/cloudstack.py
Normal file
File diff suppressed because it is too large
Load Diff
224
awx/lib/site-packages/libcloud/compute/drivers/digitalocean.py
Normal file
224
awx/lib/site-packages/libcloud/compute/drivers/digitalocean.py
Normal file
@@ -0,0 +1,224 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Digital Ocean Driver
|
||||
"""
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
|
||||
from libcloud.compute.types import Provider, NodeState, InvalidCredsError
|
||||
from libcloud.compute.base import NodeDriver
|
||||
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
|
||||
|
||||
|
||||
class DigitalOceanResponse(JsonResponse):
|
||||
def parse_error(self):
|
||||
if self.status == httplib.FOUND and '/api/error' in self.body:
|
||||
# Hacky, but DigitalOcean error responses are awful
|
||||
raise InvalidCredsError(self.body)
|
||||
elif self.status == httplib.UNAUTHORIZED:
|
||||
body = self.parse_body()
|
||||
raise InvalidCredsError(body['message'])
|
||||
else:
|
||||
body = self.parse_body()
|
||||
|
||||
if 'error_message' in body:
|
||||
error = '%s (code: %s)' % (body['error_message'], self.status)
|
||||
else:
|
||||
error = body
|
||||
return error
|
||||
|
||||
|
||||
class SSHKey(object):
|
||||
def __init__(self, id, name, pub_key):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.pub_key = pub_key
|
||||
|
||||
def __repr__(self):
|
||||
return (('<SSHKey: id=%s, name=%s, pub_key=%s>') %
|
||||
(self.id, self.name, self.pub_key))
|
||||
|
||||
|
||||
class DigitalOceanConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the DigitalOcean driver.
|
||||
"""
|
||||
|
||||
host = 'api.digitalocean.com'
|
||||
responseCls = DigitalOceanResponse
|
||||
|
||||
def add_default_params(self, params):
|
||||
"""
|
||||
Add parameters that are necessary for every request
|
||||
|
||||
This method adds ``client_id`` and ``api_key`` to
|
||||
the request.
|
||||
"""
|
||||
params['client_id'] = self.user_id
|
||||
params['api_key'] = self.key
|
||||
return params
|
||||
|
||||
|
||||
class DigitalOceanNodeDriver(NodeDriver):
|
||||
"""
|
||||
DigitalOceanNode node driver.
|
||||
"""
|
||||
|
||||
connectionCls = DigitalOceanConnection
|
||||
|
||||
type = Provider.DIGITAL_OCEAN
|
||||
name = 'Digital Ocean'
|
||||
website = 'https://www.digitalocean.com'
|
||||
|
||||
NODE_STATE_MAP = {'new': NodeState.PENDING,
|
||||
'off': NodeState.REBOOTING,
|
||||
'active': NodeState.RUNNING}
|
||||
|
||||
def list_nodes(self):
|
||||
data = self.connection.request('/droplets').object['droplets']
|
||||
return list(map(self._to_node, data))
|
||||
|
||||
def list_locations(self):
|
||||
data = self.connection.request('/regions').object['regions']
|
||||
return list(map(self._to_location, data))
|
||||
|
||||
def list_images(self):
|
||||
data = self.connection.request('/images').object['images']
|
||||
return list(map(self._to_image, data))
|
||||
|
||||
def list_sizes(self):
|
||||
data = self.connection.request('/sizes').object['sizes']
|
||||
return list(map(self._to_size, data))
|
||||
|
||||
def create_node(self, name, size, image, location, ex_ssh_key_ids=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Create a node.
|
||||
|
||||
:keyword ex_ssh_key_ids: A list of ssh key ids which will be added
|
||||
to the server. (optional)
|
||||
:type ex_ssh_key_ids: ``list`` of ``str``
|
||||
|
||||
:return: The newly created node.
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
params = {'name': name, 'size_id': size.id, 'image_id': image.id,
|
||||
'region_id': location.id}
|
||||
|
||||
if ex_ssh_key_ids:
|
||||
params['ssh_key_ids'] = ','.join(ex_ssh_key_ids)
|
||||
|
||||
data = self.connection.request('/droplets/new', params=params).object
|
||||
return self._to_node(data=data['droplet'])
|
||||
|
||||
def reboot_node(self, node):
|
||||
res = self.connection.request('/droplets/%s/reboot/' % (node.id))
|
||||
return res.status == httplib.OK
|
||||
|
||||
def destroy_node(self, node):
|
||||
params = {'scrub_data': '1'}
|
||||
res = self.connection.request('/droplets/%s/destroy/' % (node.id),
|
||||
params=params)
|
||||
return res.status == httplib.OK
|
||||
|
||||
def ex_rename_node(self, node, name):
|
||||
params = {'name': name}
|
||||
res = self.connection.request('/droplets/%s/rename/' % (node.id),
|
||||
params=params)
|
||||
return res.status == httplib.OK
|
||||
|
||||
def ex_list_ssh_keys(self):
|
||||
"""
|
||||
List all the available SSH keys.
|
||||
|
||||
:return: Available SSH keys.
|
||||
:rtype: ``list`` of :class:`SSHKey`
|
||||
"""
|
||||
data = self.connection.request('/ssh_keys').object['ssh_keys']
|
||||
return list(map(self._to_ssh_key, data))
|
||||
|
||||
def ex_create_ssh_key(self, name, ssh_key_pub):
|
||||
"""
|
||||
Create a new SSH key.
|
||||
|
||||
:param name: Key name (required)
|
||||
:type name: ``str``
|
||||
|
||||
:param name: Valid public key string (required)
|
||||
:type name: ``str``
|
||||
"""
|
||||
params = {'name': name, 'ssh_pub_key': ssh_key_pub}
|
||||
data = self.connection.request('/ssh_keys/new/', method='GET',
|
||||
params=params).object
|
||||
assert 'ssh_key' in data
|
||||
return self._to_ssh_key(data=data['ssh_key'])
|
||||
|
||||
def ex_destroy_ssh_key(self, key_id):
|
||||
"""
|
||||
Delete an existing SSH key.
|
||||
|
||||
:param key_id: SSH key id (required)
|
||||
:type key_id: ``str``
|
||||
"""
|
||||
res = self.connection.request('/ssh_keys/%s/destroy/' % (key_id))
|
||||
return res.status == httplib.OK
|
||||
|
||||
def _to_node(self, data):
|
||||
extra_keys = ['backups_active', 'region_id']
|
||||
if 'status' in data:
|
||||
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
|
||||
else:
|
||||
state = NodeState.UNKNOWN
|
||||
|
||||
if 'ip_address' in data and data['ip_address'] is not None:
|
||||
public_ips = [data['ip_address']]
|
||||
else:
|
||||
public_ips = []
|
||||
|
||||
extra = {}
|
||||
for key in extra_keys:
|
||||
if key in data:
|
||||
extra[key] = data[key]
|
||||
|
||||
node = Node(id=data['id'], name=data['name'], state=state,
|
||||
public_ips=public_ips, private_ips=None, extra=extra,
|
||||
driver=self)
|
||||
return node
|
||||
|
||||
def _to_image(self, data):
|
||||
extra = {'distribution': data['distribution']}
|
||||
return NodeImage(id=data['id'], name=data['name'], extra=extra,
|
||||
driver=self)
|
||||
|
||||
def _to_location(self, data):
|
||||
return NodeLocation(id=data['id'], name=data['name'], country=None,
|
||||
driver=self)
|
||||
|
||||
def _to_size(self, data):
|
||||
ram = data['name'].lower()
|
||||
|
||||
if 'mb' in ram:
|
||||
ram = int(ram.replace('mb', ''))
|
||||
elif 'gb' in ram:
|
||||
ram = int(ram.replace('gb', '')) * 1024
|
||||
|
||||
return NodeSize(id=data['id'], name=data['name'], ram=ram, disk=0,
|
||||
bandwidth=0, price=0, driver=self)
|
||||
|
||||
def _to_ssh_key(self, data):
|
||||
return SSHKey(id=data['id'], name=data['name'],
|
||||
pub_key=data.get('ssh_pub_key', None))
|
||||
242
awx/lib/site-packages/libcloud/compute/drivers/dreamhost.py
Normal file
242
awx/lib/site-packages/libcloud/compute/drivers/dreamhost.py
Normal file
@@ -0,0 +1,242 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
DreamHost Driver
|
||||
"""
|
||||
|
||||
import copy
|
||||
|
||||
from libcloud.common.base import ConnectionKey, JsonResponse
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
from libcloud.compute.base import Node, NodeDriver, NodeSize
|
||||
from libcloud.compute.base import NodeImage
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
|
||||
# DreamHost Private Servers can be resized on the fly, but Libcloud doesn't
|
||||
# currently support extensions to its interface, so we'll put some basic sizes
|
||||
# in for node creation.
|
||||
|
||||
DH_PS_SIZES = {
|
||||
'minimum': {
|
||||
'id': 'minimum',
|
||||
'name': 'Minimum DH PS size',
|
||||
'ram': 300,
|
||||
'disk': None,
|
||||
'bandwidth': None
|
||||
},
|
||||
'maximum': {
|
||||
'id': 'maximum',
|
||||
'name': 'Maximum DH PS size',
|
||||
'ram': 4000,
|
||||
'disk': None,
|
||||
'bandwidth': None
|
||||
},
|
||||
'default': {
|
||||
'id': 'default',
|
||||
'name': 'Default DH PS size',
|
||||
'ram': 2300,
|
||||
'disk': None,
|
||||
'bandwidth': None
|
||||
},
|
||||
'low': {
|
||||
'id': 'low',
|
||||
'name': 'DH PS with 1GB RAM',
|
||||
'ram': 1000,
|
||||
'disk': None,
|
||||
'bandwidth': None
|
||||
},
|
||||
'high': {
|
||||
'id': 'high',
|
||||
'name': 'DH PS with 3GB RAM',
|
||||
'ram': 3000,
|
||||
'disk': None,
|
||||
'bandwidth': None
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class DreamhostAPIException(Exception):
|
||||
def __str__(self):
|
||||
return self.args[0]
|
||||
|
||||
def __repr__(self):
|
||||
return "<DreamhostException '%s'>" % (self.args[0])
|
||||
|
||||
|
||||
class DreamhostResponse(JsonResponse):
|
||||
"""
|
||||
Response class for DreamHost PS
|
||||
"""
|
||||
|
||||
def parse_body(self):
|
||||
resp = super(DreamhostResponse, self).parse_body()
|
||||
if resp['result'] != 'success':
|
||||
raise Exception(self._api_parse_error(resp))
|
||||
return resp['data']
|
||||
|
||||
def parse_error(self):
|
||||
raise Exception
|
||||
|
||||
def _api_parse_error(self, response):
|
||||
if 'data' in response:
|
||||
if response['data'] == 'invalid_api_key':
|
||||
raise InvalidCredsError(
|
||||
"Oops! You've entered an invalid API key")
|
||||
else:
|
||||
raise DreamhostAPIException(response['data'])
|
||||
else:
|
||||
raise DreamhostAPIException("Unknown problem: %s" % (self.body))
|
||||
|
||||
|
||||
class DreamhostConnection(ConnectionKey):
|
||||
"""
|
||||
Connection class to connect to DreamHost's API servers
|
||||
"""
|
||||
|
||||
host = 'api.dreamhost.com'
|
||||
responseCls = DreamhostResponse
|
||||
format = 'json'
|
||||
|
||||
def add_default_params(self, params):
|
||||
"""
|
||||
Add key and format parameters to the request. Eventually should add
|
||||
unique_id to prevent re-execution of a single request.
|
||||
"""
|
||||
params['key'] = self.key
|
||||
params['format'] = self.format
|
||||
# params['unique_id'] = generate_unique_id()
|
||||
return params
|
||||
|
||||
|
||||
class DreamhostNodeDriver(NodeDriver):
|
||||
"""
|
||||
Node Driver for DreamHost PS
|
||||
"""
|
||||
type = Provider.DREAMHOST
|
||||
api_name = 'dreamhost'
|
||||
name = "Dreamhost"
|
||||
website = 'http://dreamhost.com/'
|
||||
connectionCls = DreamhostConnection
|
||||
|
||||
_sizes = DH_PS_SIZES
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create a new Dreamhost node
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword ex_movedata: Copy all your existing users to this new PS
|
||||
:type ex_movedata: ``str``
|
||||
"""
|
||||
size = kwargs['size'].ram
|
||||
params = {
|
||||
'cmd': 'dreamhost_ps-add_ps',
|
||||
'movedata': kwargs.get('movedata', 'no'),
|
||||
'type': kwargs['image'].name,
|
||||
'size': size
|
||||
}
|
||||
data = self.connection.request('/', params).object
|
||||
return Node(
|
||||
id=data['added_web'],
|
||||
name=data['added_web'],
|
||||
state=NodeState.PENDING,
|
||||
public_ips=[],
|
||||
private_ips=[],
|
||||
driver=self.connection.driver,
|
||||
extra={
|
||||
'type': kwargs['image'].name
|
||||
}
|
||||
)
|
||||
|
||||
def destroy_node(self, node):
|
||||
params = {
|
||||
'cmd': 'dreamhost_ps-remove_ps',
|
||||
'ps': node.id
|
||||
}
|
||||
try:
|
||||
return self.connection.request('/', params).success()
|
||||
except DreamhostAPIException:
|
||||
return False
|
||||
|
||||
def reboot_node(self, node):
|
||||
params = {
|
||||
'cmd': 'dreamhost_ps-reboot',
|
||||
'ps': node.id
|
||||
}
|
||||
try:
|
||||
return self.connection.request('/', params).success()
|
||||
except DreamhostAPIException:
|
||||
return False
|
||||
|
||||
def list_nodes(self, **kwargs):
|
||||
data = self.connection.request(
|
||||
'/', {'cmd': 'dreamhost_ps-list_ps'}).object
|
||||
return [self._to_node(n) for n in data]
|
||||
|
||||
def list_images(self, **kwargs):
|
||||
data = self.connection.request(
|
||||
'/', {'cmd': 'dreamhost_ps-list_images'}).object
|
||||
images = []
|
||||
for img in data:
|
||||
images.append(NodeImage(
|
||||
id=img['image'],
|
||||
name=img['image'],
|
||||
driver=self.connection.driver
|
||||
))
|
||||
return images
|
||||
|
||||
def list_sizes(self, **kwargs):
|
||||
sizes = []
|
||||
for key, values in self._sizes.items():
|
||||
attributes = copy.deepcopy(values)
|
||||
attributes.update({'price': self._get_size_price(size_id=key)})
|
||||
sizes.append(NodeSize(driver=self.connection.driver, **attributes))
|
||||
|
||||
return sizes
|
||||
|
||||
def list_locations(self, **kwargs):
|
||||
raise NotImplementedError(
|
||||
'You cannot select a location for '
|
||||
'DreamHost Private Servers at this time.')
|
||||
|
||||
def _resize_node(self, node, size):
|
||||
if (size < 300 or size > 4000):
|
||||
return False
|
||||
|
||||
params = {
|
||||
'cmd': 'dreamhost_ps-set_size',
|
||||
'ps': node.id,
|
||||
'size': size
|
||||
}
|
||||
try:
|
||||
return self.connection.request('/', params).success()
|
||||
except DreamhostAPIException:
|
||||
return False
|
||||
|
||||
def _to_node(self, data):
|
||||
"""
|
||||
Convert the data from a DreamhostResponse object into a Node
|
||||
"""
|
||||
return Node(
|
||||
id=data['ps'],
|
||||
name=data['ps'],
|
||||
state=NodeState.UNKNOWN,
|
||||
public_ips=[data['ip']],
|
||||
private_ips=[],
|
||||
driver=self.connection.driver,
|
||||
extra={
|
||||
'current_size': data['memory_mb'],
|
||||
'account_id': data['account_id'],
|
||||
'type': data['type']})
|
||||
349
awx/lib/site-packages/libcloud/compute/drivers/dummy.py
Normal file
349
awx/lib/site-packages/libcloud/compute/drivers/dummy.py
Normal file
@@ -0,0 +1,349 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Dummy Driver
|
||||
|
||||
@note: This driver is out of date
|
||||
"""
|
||||
import uuid
|
||||
import socket
|
||||
import struct
|
||||
|
||||
from libcloud.common.base import ConnectionKey
|
||||
from libcloud.compute.base import NodeImage, NodeSize, Node
|
||||
from libcloud.compute.base import NodeDriver, NodeLocation
|
||||
from libcloud.compute.base import KeyPair
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
|
||||
|
||||
class DummyConnection(ConnectionKey):
|
||||
"""
|
||||
Dummy connection class
|
||||
"""
|
||||
|
||||
def connect(self, host=None, port=None):
|
||||
pass
|
||||
|
||||
|
||||
class DummyNodeDriver(NodeDriver):
|
||||
"""
|
||||
Dummy node driver
|
||||
|
||||
This is a fake driver which appears to always create or destroy
|
||||
nodes successfully.
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> node=driver.create_node()
|
||||
>>> node.public_ips[0]
|
||||
'127.0.0.3'
|
||||
>>> node.name
|
||||
'dummy-3'
|
||||
|
||||
If the credentials you give convert to an integer then the next
|
||||
node to be created will be one higher.
|
||||
|
||||
Each time you create a node you will get a different IP address.
|
||||
|
||||
>>> driver = DummyNodeDriver(22)
|
||||
>>> node=driver.create_node()
|
||||
>>> node.name
|
||||
'dummy-23'
|
||||
|
||||
"""
|
||||
|
||||
name = "Dummy Node Provider"
|
||||
website = 'http://example.com'
|
||||
type = Provider.DUMMY
|
||||
|
||||
def __init__(self, creds):
|
||||
"""
|
||||
:param creds: Credentials
|
||||
:type creds: ``str``
|
||||
|
||||
:rtype: ``None``
|
||||
"""
|
||||
self.creds = creds
|
||||
try:
|
||||
num = int(creds)
|
||||
except ValueError:
|
||||
num = None
|
||||
if num:
|
||||
self.nl = []
|
||||
startip = _ip_to_int('127.0.0.1')
|
||||
for i in range(num):
|
||||
ip = _int_to_ip(startip + i)
|
||||
self.nl.append(
|
||||
Node(id=i,
|
||||
name='dummy-%d' % (i),
|
||||
state=NodeState.RUNNING,
|
||||
public_ips=[ip],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
extra={'foo': 'bar'})
|
||||
)
|
||||
else:
|
||||
self.nl = [
|
||||
Node(id=1,
|
||||
name='dummy-1',
|
||||
state=NodeState.RUNNING,
|
||||
public_ips=['127.0.0.1'],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
extra={'foo': 'bar'}),
|
||||
Node(id=2,
|
||||
name='dummy-2',
|
||||
state=NodeState.RUNNING,
|
||||
public_ips=['127.0.0.1'],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
extra={'foo': 'bar'}),
|
||||
]
|
||||
self.connection = DummyConnection(self.creds)
|
||||
|
||||
def get_uuid(self, unique_field=None):
|
||||
"""
|
||||
|
||||
:param unique_field: Unique field
|
||||
:type unique_field: ``bool``
|
||||
:rtype: :class:`UUID`
|
||||
"""
|
||||
return str(uuid.uuid4())
|
||||
|
||||
def list_nodes(self):
|
||||
"""
|
||||
List the nodes known to a particular driver;
|
||||
There are two default nodes created at the beginning
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> node_list=driver.list_nodes()
|
||||
>>> sorted([node.name for node in node_list ])
|
||||
['dummy-1', 'dummy-2']
|
||||
|
||||
each item in the list returned is a node object from which you
|
||||
can carry out any node actions you wish
|
||||
|
||||
>>> node_list[0].reboot()
|
||||
True
|
||||
|
||||
As more nodes are added, list_nodes will return them
|
||||
|
||||
>>> node=driver.create_node()
|
||||
>>> node.size.id
|
||||
's1'
|
||||
>>> node.image.id
|
||||
'i2'
|
||||
>>> sorted([n.name for n in driver.list_nodes()])
|
||||
['dummy-1', 'dummy-2', 'dummy-3']
|
||||
|
||||
@inherits: :class:`NodeDriver.list_nodes`
|
||||
"""
|
||||
return self.nl
|
||||
|
||||
def reboot_node(self, node):
|
||||
"""
|
||||
Sets the node state to rebooting; in this dummy driver always
|
||||
returns True as if the reboot had been successful.
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> node=driver.create_node()
|
||||
>>> from libcloud.compute.types import NodeState
|
||||
>>> node.state == NodeState.RUNNING
|
||||
True
|
||||
>>> node.state == NodeState.REBOOTING
|
||||
False
|
||||
>>> driver.reboot_node(node)
|
||||
True
|
||||
>>> node.state == NodeState.REBOOTING
|
||||
True
|
||||
|
||||
Please note, dummy nodes never recover from the reboot.
|
||||
|
||||
@inherits: :class:`NodeDriver.reboot_node`
|
||||
"""
|
||||
|
||||
node.state = NodeState.REBOOTING
|
||||
return True
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""
|
||||
Sets the node state to terminated and removes it from the node list
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> from libcloud.compute.types import NodeState
|
||||
>>> node = [node for node in driver.list_nodes() if
|
||||
... node.name == 'dummy-1'][0]
|
||||
>>> node.state == NodeState.RUNNING
|
||||
True
|
||||
>>> driver.destroy_node(node)
|
||||
True
|
||||
>>> node.state == NodeState.RUNNING
|
||||
False
|
||||
>>> [n for n in driver.list_nodes() if n.name == 'dummy-1']
|
||||
[]
|
||||
|
||||
@inherits: :class:`NodeDriver.destroy_node`
|
||||
"""
|
||||
|
||||
node.state = NodeState.TERMINATED
|
||||
self.nl.remove(node)
|
||||
return True
|
||||
|
||||
def list_images(self, location=None):
|
||||
"""
|
||||
Returns a list of images as a cloud provider might have
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> sorted([image.name for image in driver.list_images()])
|
||||
['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10']
|
||||
|
||||
@inherits: :class:`NodeDriver.list_images`
|
||||
"""
|
||||
return [
|
||||
NodeImage(id=1, name="Ubuntu 9.10", driver=self),
|
||||
NodeImage(id=2, name="Ubuntu 9.04", driver=self),
|
||||
NodeImage(id=3, name="Slackware 4", driver=self),
|
||||
]
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
"""
|
||||
Returns a list of node sizes as a cloud provider might have
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> sorted([size.ram for size in driver.list_sizes()])
|
||||
[128, 512, 4096, 8192]
|
||||
|
||||
@inherits: :class:`NodeDriver.list_images`
|
||||
"""
|
||||
|
||||
return [
|
||||
NodeSize(id=1,
|
||||
name="Small",
|
||||
ram=128,
|
||||
disk=4,
|
||||
bandwidth=500,
|
||||
price=4,
|
||||
driver=self),
|
||||
NodeSize(id=2,
|
||||
name="Medium",
|
||||
ram=512,
|
||||
disk=16,
|
||||
bandwidth=1500,
|
||||
price=8,
|
||||
driver=self),
|
||||
NodeSize(id=3,
|
||||
name="Big",
|
||||
ram=4096,
|
||||
disk=32,
|
||||
bandwidth=2500,
|
||||
price=32,
|
||||
driver=self),
|
||||
NodeSize(id=4,
|
||||
name="XXL Big",
|
||||
ram=4096 * 2,
|
||||
disk=32 * 4,
|
||||
bandwidth=2500 * 3,
|
||||
price=32 * 2,
|
||||
driver=self),
|
||||
]
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
Returns a list of locations of nodes
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> sorted([loc.name + " in " + loc.country for loc in
|
||||
... driver.list_locations()])
|
||||
['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"]
|
||||
|
||||
@inherits: :class:`NodeDriver.list_locations`
|
||||
"""
|
||||
return [
|
||||
NodeLocation(id=1,
|
||||
name="Paul's Room",
|
||||
country='US',
|
||||
driver=self),
|
||||
NodeLocation(id=2,
|
||||
name="London Loft",
|
||||
country='GB',
|
||||
driver=self),
|
||||
NodeLocation(id=3,
|
||||
name="Island Datacenter",
|
||||
country='FJ',
|
||||
driver=self),
|
||||
]
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Creates a dummy node; the node id is equal to the number of
|
||||
nodes in the node list
|
||||
|
||||
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
|
||||
>>> driver = DummyNodeDriver(0)
|
||||
>>> sorted([node.name for node in driver.list_nodes()])
|
||||
['dummy-1', 'dummy-2']
|
||||
>>> nodeA = driver.create_node()
|
||||
>>> sorted([node.name for node in driver.list_nodes()])
|
||||
['dummy-1', 'dummy-2', 'dummy-3']
|
||||
>>> driver.create_node().name
|
||||
'dummy-4'
|
||||
>>> driver.destroy_node(nodeA)
|
||||
True
|
||||
>>> sorted([node.name for node in driver.list_nodes()])
|
||||
['dummy-1', 'dummy-2', 'dummy-4']
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
"""
|
||||
l = len(self.nl) + 1
|
||||
n = Node(id=l,
|
||||
name='dummy-%d' % l,
|
||||
state=NodeState.RUNNING,
|
||||
public_ips=['127.0.0.%d' % l],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
size=NodeSize(id='s1', name='foo', ram=2048,
|
||||
disk=160, bandwidth=None, price=0.0,
|
||||
driver=self),
|
||||
image=NodeImage(id='i2', name='image', driver=self),
|
||||
extra={'foo': 'bar'})
|
||||
self.nl.append(n)
|
||||
return n
|
||||
|
||||
def import_key_pair_from_string(self, name, key_material):
|
||||
key_pair = KeyPair(name=name,
|
||||
public_key=key_material,
|
||||
fingerprint='fingerprint',
|
||||
private_key='private_key',
|
||||
driver=self)
|
||||
return key_pair
|
||||
|
||||
|
||||
def _ip_to_int(ip):
|
||||
return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0])
|
||||
|
||||
|
||||
def _int_to_ip(ip):
|
||||
return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
|
||||
doctest.testmod()
|
||||
5770
awx/lib/site-packages/libcloud/compute/drivers/ec2.py
Normal file
5770
awx/lib/site-packages/libcloud/compute/drivers/ec2.py
Normal file
File diff suppressed because it is too large
Load Diff
385
awx/lib/site-packages/libcloud/compute/drivers/ecp.py
Normal file
385
awx/lib/site-packages/libcloud/compute/drivers/ecp.py
Normal file
@@ -0,0 +1,385 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Enomaly ECP driver
|
||||
"""
|
||||
import time
|
||||
import base64
|
||||
import os
|
||||
import socket
|
||||
import binascii
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
# JSON is included in the standard library starting with Python 2.6. For 2.5
|
||||
# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.common.base import Response, ConnectionUserAndKey
|
||||
from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation
|
||||
from libcloud.compute.base import NodeImage, Node
|
||||
from libcloud.compute.types import Provider, NodeState, InvalidCredsError
|
||||
from libcloud.utils.networking import is_private_subnet
|
||||
|
||||
# Defaults
|
||||
API_HOST = ''
|
||||
API_PORT = (80, 443)
|
||||
|
||||
|
||||
class ECPResponse(Response):
|
||||
def success(self):
|
||||
if self.status == httplib.OK or self.status == httplib.CREATED:
|
||||
try:
|
||||
j_body = json.loads(self.body)
|
||||
except ValueError:
|
||||
self.error = "JSON response cannot be decoded."
|
||||
return False
|
||||
if j_body['errno'] == 0:
|
||||
return True
|
||||
else:
|
||||
self.error = "ECP error: %s" % j_body['message']
|
||||
return False
|
||||
elif self.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError()
|
||||
else:
|
||||
self.error = "HTTP Error Code: %s" % self.status
|
||||
return False
|
||||
|
||||
def parse_error(self):
|
||||
return self.error
|
||||
|
||||
# Interpret the json responses - no error checking required
|
||||
def parse_body(self):
|
||||
return json.loads(self.body)
|
||||
|
||||
def getheaders(self):
|
||||
return self.headers
|
||||
|
||||
|
||||
class ECPConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the Enomaly ECP driver
|
||||
"""
|
||||
|
||||
responseCls = ECPResponse
|
||||
host = API_HOST
|
||||
port = API_PORT
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
# Authentication
|
||||
username = self.user_id
|
||||
password = self.key
|
||||
base64string = base64.encodestring(
|
||||
b('%s:%s' % (username, password)))[:-1]
|
||||
authheader = "Basic %s" % base64string
|
||||
headers['Authorization'] = authheader
|
||||
|
||||
return headers
|
||||
|
||||
def _encode_multipart_formdata(self, fields):
|
||||
"""
|
||||
Based on Wade Leftwich's function:
|
||||
http://code.activestate.com/recipes/146306/
|
||||
"""
|
||||
# use a random boundary that does not appear in the fields
|
||||
boundary = ''
|
||||
while boundary in ''.join(fields):
|
||||
boundary = binascii.hexlify(os.urandom(16)).decode('utf-8')
|
||||
L = []
|
||||
for i in fields:
|
||||
L.append('--' + boundary)
|
||||
L.append('Content-Disposition: form-data; name="%s"' % i)
|
||||
L.append('')
|
||||
L.append(fields[i])
|
||||
L.append('--' + boundary + '--')
|
||||
L.append('')
|
||||
body = '\r\n'.join(L)
|
||||
content_type = 'multipart/form-data; boundary=%s' % boundary
|
||||
header = {'Content-Type': content_type}
|
||||
return header, body
|
||||
|
||||
|
||||
class ECPNodeDriver(NodeDriver):
|
||||
"""
|
||||
Enomaly ECP node driver
|
||||
"""
|
||||
|
||||
name = "Enomaly Elastic Computing Platform"
|
||||
website = 'http://www.enomaly.com/'
|
||||
type = Provider.ECP
|
||||
connectionCls = ECPConnection
|
||||
|
||||
def list_nodes(self):
|
||||
"""
|
||||
Returns a list of all running Nodes
|
||||
|
||||
:rtype: ``list`` of :class:`Node`
|
||||
"""
|
||||
|
||||
# Make the call
|
||||
res = self.connection.request('/rest/hosting/vm/list').parse_body()
|
||||
|
||||
# Put together a list of node objects
|
||||
nodes = []
|
||||
for vm in res['vms']:
|
||||
node = self._to_node(vm)
|
||||
if node is not None:
|
||||
nodes.append(node)
|
||||
|
||||
# And return it
|
||||
return nodes
|
||||
|
||||
def _to_node(self, vm):
|
||||
"""
|
||||
Turns a (json) dictionary into a Node object.
|
||||
This returns only running VMs.
|
||||
"""
|
||||
|
||||
# Check state
|
||||
if not vm['state'] == "running":
|
||||
return None
|
||||
|
||||
# IPs
|
||||
iplist = [interface['ip'] for interface in vm['interfaces'] if
|
||||
interface['ip'] != '127.0.0.1']
|
||||
|
||||
public_ips = []
|
||||
private_ips = []
|
||||
for ip in iplist:
|
||||
try:
|
||||
socket.inet_aton(ip)
|
||||
except socket.error:
|
||||
# not a valid ip
|
||||
continue
|
||||
if is_private_subnet(ip):
|
||||
private_ips.append(ip)
|
||||
else:
|
||||
public_ips.append(ip)
|
||||
|
||||
# Create the node object
|
||||
n = Node(
|
||||
id=vm['uuid'],
|
||||
name=vm['name'],
|
||||
state=NodeState.RUNNING,
|
||||
public_ips=public_ips,
|
||||
private_ips=private_ips,
|
||||
driver=self,
|
||||
)
|
||||
|
||||
return n
|
||||
|
||||
def reboot_node(self, node):
|
||||
"""
|
||||
Shuts down a VM and then starts it again.
|
||||
|
||||
@inherits: :class:`NodeDriver.reboot_node`
|
||||
"""
|
||||
|
||||
# Turn the VM off
|
||||
# Black magic to make the POST requests work
|
||||
d = self.connection._encode_multipart_formdata({'action': 'stop'})
|
||||
self.connection.request(
|
||||
'/rest/hosting/vm/%s' % node.id,
|
||||
method='POST',
|
||||
headers=d[0],
|
||||
data=d[1]
|
||||
).parse_body()
|
||||
|
||||
node.state = NodeState.REBOOTING
|
||||
# Wait for it to turn off and then continue (to turn it on again)
|
||||
while node.state == NodeState.REBOOTING:
|
||||
# Check if it's off.
|
||||
response = self.connection.request(
|
||||
'/rest/hosting/vm/%s' % node.id
|
||||
).parse_body()
|
||||
if response['vm']['state'] == 'off':
|
||||
node.state = NodeState.TERMINATED
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
# Turn the VM back on.
|
||||
# Black magic to make the POST requests work
|
||||
d = self.connection._encode_multipart_formdata({'action': 'start'})
|
||||
self.connection.request(
|
||||
'/rest/hosting/vm/%s' % node.id,
|
||||
method='POST',
|
||||
headers=d[0],
|
||||
data=d[1]
|
||||
).parse_body()
|
||||
|
||||
node.state = NodeState.RUNNING
|
||||
return True
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""
|
||||
Shuts down and deletes a VM.
|
||||
|
||||
@inherits: :class:`NodeDriver.destroy_node`
|
||||
"""
|
||||
|
||||
# Shut down first
|
||||
# Black magic to make the POST requests work
|
||||
d = self.connection._encode_multipart_formdata({'action': 'stop'})
|
||||
self.connection.request(
|
||||
'/rest/hosting/vm/%s' % node.id,
|
||||
method='POST',
|
||||
headers=d[0],
|
||||
data=d[1]
|
||||
).parse_body()
|
||||
|
||||
# Ensure there was no applicationl level error
|
||||
node.state = NodeState.PENDING
|
||||
# Wait for the VM to turn off before continuing
|
||||
while node.state == NodeState.PENDING:
|
||||
# Check if it's off.
|
||||
response = self.connection.request(
|
||||
'/rest/hosting/vm/%s' % node.id
|
||||
).parse_body()
|
||||
if response['vm']['state'] == 'off':
|
||||
node.state = NodeState.TERMINATED
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
# Delete the VM
|
||||
# Black magic to make the POST requests work
|
||||
d = self.connection._encode_multipart_formdata({'action': 'delete'})
|
||||
self.connection.request(
|
||||
'/rest/hosting/vm/%s' % (node.id),
|
||||
method='POST',
|
||||
headers=d[0],
|
||||
data=d[1]
|
||||
).parse_body()
|
||||
|
||||
return True
|
||||
|
||||
def list_images(self, location=None):
|
||||
"""
|
||||
Returns a list of all package templates aka appiances aka images.
|
||||
|
||||
@inherits: :class:`NodeDriver.list_images`
|
||||
"""
|
||||
|
||||
# Make the call
|
||||
response = self.connection.request(
|
||||
'/rest/hosting/ptemplate/list').parse_body()
|
||||
|
||||
# Turn the response into an array of NodeImage objects
|
||||
images = []
|
||||
for ptemplate in response['packages']:
|
||||
images.append(NodeImage(
|
||||
id=ptemplate['uuid'],
|
||||
name='%s: %s' % (ptemplate['name'], ptemplate['description']),
|
||||
driver=self,)
|
||||
)
|
||||
|
||||
return images
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
"""
|
||||
Returns a list of all hardware templates
|
||||
|
||||
@inherits: :class:`NodeDriver.list_sizes`
|
||||
"""
|
||||
|
||||
# Make the call
|
||||
response = self.connection.request(
|
||||
'/rest/hosting/htemplate/list').parse_body()
|
||||
|
||||
# Turn the response into an array of NodeSize objects
|
||||
sizes = []
|
||||
for htemplate in response['templates']:
|
||||
sizes.append(NodeSize(
|
||||
id=htemplate['uuid'],
|
||||
name=htemplate['name'],
|
||||
ram=htemplate['memory'],
|
||||
disk=0, # Disk is independent of hardware template.
|
||||
bandwidth=0, # There is no way to keep track of bandwidth.
|
||||
price=0, # The billing system is external.
|
||||
driver=self,)
|
||||
)
|
||||
|
||||
return sizes
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
This feature does not exist in ECP. Returns hard coded dummy location.
|
||||
|
||||
:rtype: ``list`` of :class:`NodeLocation`
|
||||
"""
|
||||
return [NodeLocation(id=1,
|
||||
name="Cloud",
|
||||
country='',
|
||||
driver=self),
|
||||
]
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Creates a virtual machine.
|
||||
|
||||
:keyword name: String with a name for this new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword size: The size of resources allocated to this node .
|
||||
(required)
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword image: OS Image to boot on node. (required)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
|
||||
# Find out what network to put the VM on.
|
||||
res = self.connection.request(
|
||||
'/rest/hosting/network/list').parse_body()
|
||||
|
||||
# Use the first / default network because there is no way to specific
|
||||
# which one
|
||||
network = res['networks'][0]['uuid']
|
||||
|
||||
# Prepare to make the VM
|
||||
data = {
|
||||
'name': str(kwargs['name']),
|
||||
'package': str(kwargs['image'].id),
|
||||
'hardware': str(kwargs['size'].id),
|
||||
'network_uuid': str(network),
|
||||
'disk': ''
|
||||
}
|
||||
|
||||
# Black magic to make the POST requests work
|
||||
d = self.connection._encode_multipart_formdata(data)
|
||||
response = self.connection.request(
|
||||
'/rest/hosting/vm/',
|
||||
method='PUT',
|
||||
headers=d[0],
|
||||
data=d[1]
|
||||
).parse_body()
|
||||
|
||||
# Create a node object and return it.
|
||||
n = Node(
|
||||
id=response['machine_id'],
|
||||
name=data['name'],
|
||||
state=NodeState.PENDING,
|
||||
public_ips=[],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
)
|
||||
|
||||
return n
|
||||
236
awx/lib/site-packages/libcloud/compute/drivers/elastichosts.py
Normal file
236
awx/lib/site-packages/libcloud/compute/drivers/elastichosts.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
ElasticHosts Driver
|
||||
"""
|
||||
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver
|
||||
|
||||
|
||||
# API end-points
|
||||
API_ENDPOINTS = {
|
||||
'lon-p': {
|
||||
'name': 'London Peer 1',
|
||||
'country': 'United Kingdom',
|
||||
'host': 'api-lon-p.elastichosts.com'
|
||||
},
|
||||
'lon-b': {
|
||||
'name': 'London BlueSquare',
|
||||
'country': 'United Kingdom',
|
||||
'host': 'api-lon-b.elastichosts.com'
|
||||
},
|
||||
'sat-p': {
|
||||
'name': 'San Antonio Peer 1',
|
||||
'country': 'United States',
|
||||
'host': 'api-sat-p.elastichosts.com'
|
||||
},
|
||||
'lax-p': {
|
||||
'name': 'Los Angeles Peer 1',
|
||||
'country': 'United States',
|
||||
'host': 'api-lax-p.elastichosts.com'
|
||||
},
|
||||
'sjc-c': {
|
||||
'name': 'San Jose (Silicon Valley)',
|
||||
'country': 'United States',
|
||||
'host': 'api-sjc-c.elastichosts.com'
|
||||
},
|
||||
'tor-p': {
|
||||
'name': 'Toronto Peer 1',
|
||||
'country': 'Canada',
|
||||
'host': 'api-tor-p.elastichosts.com'
|
||||
},
|
||||
'syd-y': {
|
||||
'name': 'Sydney',
|
||||
'country': 'Australia',
|
||||
'host': 'api-syd-v.elastichosts.com'
|
||||
},
|
||||
'cn-1': {
|
||||
'name': 'Hong Kong',
|
||||
'country': 'China',
|
||||
'host': 'api-hkg-e.elastichosts.com'
|
||||
}
|
||||
}
|
||||
|
||||
# Default API end-point for the base connection class.
|
||||
DEFAULT_REGION = 'sat-p'
|
||||
|
||||
# Retrieved from http://www.elastichosts.com/cloud-hosting/api
|
||||
STANDARD_DRIVES = {
|
||||
'38df0986-4d85-4b76-b502-3878ffc80161': {
|
||||
'uuid': '38df0986-4d85-4b76-b502-3878ffc80161',
|
||||
'description': 'CentOS Linux 5.5',
|
||||
'size_gunzipped': '3GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'980cf63c-f21e-4382-997b-6541d5809629': {
|
||||
'uuid': '980cf63c-f21e-4382-997b-6541d5809629',
|
||||
'description': 'Debian Linux 5.0',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'aee5589a-88c3-43ef-bb0a-9cab6e64192d': {
|
||||
'uuid': 'aee5589a-88c3-43ef-bb0a-9cab6e64192d',
|
||||
'description': 'Ubuntu Linux 10.04',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'62f512cd-82c7-498e-88d8-a09ac2ef20e7': {
|
||||
'uuid': '62f512cd-82c7-498e-88d8-a09ac2ef20e7',
|
||||
'description': 'Ubuntu Linux 12.04',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': {
|
||||
'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0',
|
||||
'description': 'Windows Web Server 2008',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
'30824e97-05a4-410c-946e-2ba5a92b07cb': {
|
||||
'uuid': '30824e97-05a4-410c-946e-2ba5a92b07cb',
|
||||
'description': 'Windows Web Server 2008 R2',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
'9ecf810e-6ad1-40ef-b360-d606f0444671': {
|
||||
'uuid': '9ecf810e-6ad1-40ef-b360-d606f0444671',
|
||||
'description': 'Windows Web Server 2008 R2 + SQL Server',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
'10a88d1c-6575-46e3-8d2c-7744065ea530': {
|
||||
'uuid': '10a88d1c-6575-46e3-8d2c-7744065ea530',
|
||||
'description': 'Windows Server 2008 Standard R2',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
'2567f25c-8fb8-45c7-95fc-bfe3c3d84c47': {
|
||||
'uuid': '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47',
|
||||
'description': 'Windows Server 2008 Standard R2 + SQL Server',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ElasticHostsException(Exception):
|
||||
def __str__(self):
|
||||
return self.args[0]
|
||||
|
||||
def __repr__(self):
|
||||
return "<ElasticHostsException '%s'>" % (self.args[0])
|
||||
|
||||
|
||||
class ElasticHostsNodeDriver(ElasticStackBaseNodeDriver):
|
||||
"""
|
||||
Node Driver class for ElasticHosts
|
||||
"""
|
||||
type = Provider.ELASTICHOSTS
|
||||
api_name = 'elastichosts'
|
||||
name = 'ElasticHosts'
|
||||
website = 'http://www.elastichosts.com/'
|
||||
features = {"create_node": ["generates_password"]}
|
||||
_standard_drives = STANDARD_DRIVES
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
region=DEFAULT_REGION, **kwargs):
|
||||
|
||||
if hasattr(self, '_region'):
|
||||
region = self._region
|
||||
|
||||
if region not in API_ENDPOINTS:
|
||||
raise ValueError('Invalid region: %s' % (region))
|
||||
|
||||
self._host_argument_set = host is not None
|
||||
super(ElasticHostsNodeDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port,
|
||||
region=region, **kwargs)
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
"""
|
||||
Return the host value based on the user supplied region.
|
||||
"""
|
||||
kwargs = {}
|
||||
if not self._host_argument_set:
|
||||
kwargs['host'] = API_ENDPOINTS[self.region]['host']
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
class ElasticHostsUK1NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the London Peer 1 end-point
|
||||
"""
|
||||
name = 'ElasticHosts (lon-p)'
|
||||
_region = 'lon-p'
|
||||
|
||||
|
||||
class ElasticHostsUK2NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the London Bluesquare end-point
|
||||
"""
|
||||
name = 'ElasticHosts (lon-b)'
|
||||
_region = 'lon-b'
|
||||
|
||||
|
||||
class ElasticHostsUS1NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the San Antonio Peer 1 end-point
|
||||
"""
|
||||
name = 'ElasticHosts (sat-p)'
|
||||
_region = 'sat-p'
|
||||
|
||||
|
||||
class ElasticHostsUS2NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the Los Angeles Peer 1 end-point
|
||||
"""
|
||||
name = 'ElasticHosts (lax-p)'
|
||||
_region = 'lax-p'
|
||||
|
||||
|
||||
class ElasticHostsUS3NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the San Jose (Silicon Valley) end-point
|
||||
"""
|
||||
name = 'ElasticHosts (sjc-c)'
|
||||
_region = 'sjc-c'
|
||||
|
||||
|
||||
class ElasticHostsCA1NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the Toronto Peer 1 end-point
|
||||
"""
|
||||
name = 'ElasticHosts (tor-p)'
|
||||
_region = 'tor-p'
|
||||
|
||||
|
||||
class ElasticHostsAU1NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the Sydney end-point
|
||||
"""
|
||||
name = 'ElasticHosts (syd-y)'
|
||||
_region = 'syd-y'
|
||||
|
||||
|
||||
class ElasticHostsCN1NodeDriver(ElasticHostsNodeDriver):
|
||||
"""
|
||||
ElasticHosts node driver for the Hong Kong end-point
|
||||
"""
|
||||
name = 'ElasticHosts (cn-1)'
|
||||
_region = 'cn-1'
|
||||
488
awx/lib/site-packages/libcloud/compute/drivers/elasticstack.py
Normal file
488
awx/lib/site-packages/libcloud/compute/drivers/elasticstack.py
Normal file
@@ -0,0 +1,488 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Base driver for the providers based on the ElasticStack platform -
|
||||
http://www.elasticstack.com.
|
||||
"""
|
||||
|
||||
import re
|
||||
import time
|
||||
import base64
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
from libcloud.compute.types import NodeState
|
||||
from libcloud.compute.base import NodeDriver, NodeSize, Node
|
||||
from libcloud.compute.base import NodeImage
|
||||
from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment
|
||||
from libcloud.compute.deployment import MultiStepDeployment
|
||||
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'active': NodeState.RUNNING,
|
||||
'dead': NodeState.TERMINATED,
|
||||
'dumped': NodeState.TERMINATED,
|
||||
}
|
||||
|
||||
# Default timeout (in seconds) for the drive imaging process
|
||||
IMAGING_TIMEOUT = 10 * 60
|
||||
|
||||
# ElasticStack doesn't specify special instance types, so I just specified
|
||||
# some plans based on the other provider offerings.
|
||||
#
|
||||
# Basically for CPU any value between 500Mhz and 20000Mhz should work,
|
||||
# 256MB to 8192MB for ram and 1GB to 2TB for disk.
|
||||
INSTANCE_TYPES = {
|
||||
'small': {
|
||||
'id': 'small',
|
||||
'name': 'Small instance',
|
||||
'cpu': 2000,
|
||||
'memory': 1700,
|
||||
'disk': 160,
|
||||
'bandwidth': None,
|
||||
},
|
||||
'medium': {
|
||||
'id': 'medium',
|
||||
'name': 'Medium instance',
|
||||
'cpu': 3000,
|
||||
'memory': 4096,
|
||||
'disk': 500,
|
||||
'bandwidth': None,
|
||||
},
|
||||
'large': {
|
||||
'id': 'large',
|
||||
'name': 'Large instance',
|
||||
'cpu': 4000,
|
||||
'memory': 7680,
|
||||
'disk': 850,
|
||||
'bandwidth': None,
|
||||
},
|
||||
'extra-large': {
|
||||
'id': 'extra-large',
|
||||
'name': 'Extra Large instance',
|
||||
'cpu': 8000,
|
||||
'memory': 8192,
|
||||
'disk': 1690,
|
||||
'bandwidth': None,
|
||||
},
|
||||
'high-cpu-medium': {
|
||||
'id': 'high-cpu-medium',
|
||||
'name': 'High-CPU Medium instance',
|
||||
'cpu': 5000,
|
||||
'memory': 1700,
|
||||
'disk': 350,
|
||||
'bandwidth': None,
|
||||
},
|
||||
'high-cpu-extra-large': {
|
||||
'id': 'high-cpu-extra-large',
|
||||
'name': 'High-CPU Extra Large instance',
|
||||
'cpu': 20000,
|
||||
'memory': 7168,
|
||||
'disk': 1690,
|
||||
'bandwidth': None,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ElasticStackException(Exception):
|
||||
def __str__(self):
|
||||
return self.args[0]
|
||||
|
||||
def __repr__(self):
|
||||
return "<ElasticStackException '%s'>" % (self.args[0])
|
||||
|
||||
|
||||
class ElasticStackResponse(JsonResponse):
|
||||
def success(self):
|
||||
if self.status == 401:
|
||||
raise InvalidCredsError()
|
||||
|
||||
return self.status >= 200 and self.status <= 299
|
||||
|
||||
def parse_error(self):
|
||||
error_header = self.headers.get('x-elastic-error', '')
|
||||
return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip())
|
||||
|
||||
|
||||
class ElasticStackNodeSize(NodeSize):
|
||||
def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.cpu = cpu
|
||||
self.ram = ram
|
||||
self.disk = disk
|
||||
self.bandwidth = bandwidth
|
||||
self.price = price
|
||||
self.driver = driver
|
||||
|
||||
def __repr__(self):
|
||||
return (('<NodeSize: id=%s, name=%s, cpu=%s, ram=%s '
|
||||
'disk=%s bandwidth=%s price=%s driver=%s ...>')
|
||||
% (self.id, self.name, self.cpu, self.ram,
|
||||
self.disk, self.bandwidth, self.price, self.driver.name))
|
||||
|
||||
|
||||
class ElasticStackBaseConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Base connection class for the ElasticStack driver
|
||||
"""
|
||||
|
||||
host = None
|
||||
responseCls = ElasticStackResponse
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Accept'] = 'application/json'
|
||||
headers['Content-Type'] = 'application/json'
|
||||
headers['Authorization'] = \
|
||||
('Basic %s' % (base64.b64encode(b('%s:%s' % (self.user_id,
|
||||
self.key))))
|
||||
.decode('utf-8'))
|
||||
return headers
|
||||
|
||||
|
||||
class ElasticStackBaseNodeDriver(NodeDriver):
|
||||
website = 'http://www.elasticstack.com'
|
||||
connectionCls = ElasticStackBaseConnection
|
||||
features = {"create_node": ["generates_password"]}
|
||||
|
||||
def reboot_node(self, node):
|
||||
# Reboots the node
|
||||
response = self.connection.request(
|
||||
action='/servers/%s/reset' % (node.id),
|
||||
method='POST'
|
||||
)
|
||||
return response.status == 204
|
||||
|
||||
def destroy_node(self, node):
|
||||
# Kills the server immediately
|
||||
response = self.connection.request(
|
||||
action='/servers/%s/destroy' % (node.id),
|
||||
method='POST'
|
||||
)
|
||||
return response.status == 204
|
||||
|
||||
def list_images(self, location=None):
|
||||
# Returns a list of available pre-installed system drive images
|
||||
images = []
|
||||
for key, value in self._standard_drives.items():
|
||||
image = NodeImage(
|
||||
id=value['uuid'],
|
||||
name=value['description'],
|
||||
driver=self.connection.driver,
|
||||
extra={
|
||||
'size_gunzipped': value['size_gunzipped']
|
||||
}
|
||||
)
|
||||
images.append(image)
|
||||
|
||||
return images
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
sizes = []
|
||||
for key, value in INSTANCE_TYPES.items():
|
||||
size = ElasticStackNodeSize(
|
||||
id=value['id'],
|
||||
name=value['name'], cpu=value['cpu'], ram=value['memory'],
|
||||
disk=value['disk'], bandwidth=value['bandwidth'],
|
||||
price=self._get_size_price(size_id=value['id']),
|
||||
driver=self.connection.driver
|
||||
)
|
||||
sizes.append(size)
|
||||
|
||||
return sizes
|
||||
|
||||
def list_nodes(self):
|
||||
# Returns a list of active (running) nodes
|
||||
response = self.connection.request(action='/servers/info').object
|
||||
|
||||
nodes = []
|
||||
for data in response:
|
||||
node = self._to_node(data)
|
||||
nodes.append(node)
|
||||
|
||||
return nodes
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Creates a ElasticStack instance
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword name: String with a name for this new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword smp: Number of virtual processors or None to calculate
|
||||
based on the cpu speed
|
||||
:type smp: ``int``
|
||||
|
||||
:keyword nic_model: e1000, rtl8139 or virtio
|
||||
(if not specified, e1000 is used)
|
||||
:type nic_model: ``str``
|
||||
|
||||
:keyword vnc_password: If set, the same password is also used for
|
||||
SSH access with user toor,
|
||||
otherwise VNC access is disabled and
|
||||
no SSH login is possible.
|
||||
:type vnc_password: ``str``
|
||||
"""
|
||||
size = kwargs['size']
|
||||
image = kwargs['image']
|
||||
smp = kwargs.get('smp', 'auto')
|
||||
nic_model = kwargs.get('nic_model', 'e1000')
|
||||
vnc_password = ssh_password = kwargs.get('vnc_password', None)
|
||||
|
||||
if nic_model not in ('e1000', 'rtl8139', 'virtio'):
|
||||
raise ElasticStackException('Invalid NIC model specified')
|
||||
|
||||
# check that drive size is not smaller than pre installed image size
|
||||
|
||||
# First we create a drive with the specified size
|
||||
drive_data = {}
|
||||
drive_data.update({'name': kwargs['name'],
|
||||
'size': '%sG' % (kwargs['size'].disk)})
|
||||
|
||||
response = self.connection.request(action='/drives/create',
|
||||
data=json.dumps(drive_data),
|
||||
method='POST').object
|
||||
|
||||
if not response:
|
||||
raise ElasticStackException('Drive creation failed')
|
||||
|
||||
drive_uuid = response['drive']
|
||||
|
||||
# Then we image the selected pre-installed system drive onto it
|
||||
response = self.connection.request(
|
||||
action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id),
|
||||
method='POST'
|
||||
)
|
||||
|
||||
if response.status not in (200, 204):
|
||||
raise ElasticStackException('Drive imaging failed')
|
||||
|
||||
# We wait until the drive is imaged and then boot up the node
|
||||
# (in most cases, the imaging process shouldn't take longer
|
||||
# than a few minutes)
|
||||
response = self.connection.request(
|
||||
action='/drives/%s/info' % (drive_uuid)
|
||||
).object
|
||||
|
||||
imaging_start = time.time()
|
||||
while 'imaging' in response:
|
||||
response = self.connection.request(
|
||||
action='/drives/%s/info' % (drive_uuid)
|
||||
).object
|
||||
|
||||
elapsed_time = time.time() - imaging_start
|
||||
if ('imaging' in response and elapsed_time >= IMAGING_TIMEOUT):
|
||||
raise ElasticStackException('Drive imaging timed out')
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
node_data = {}
|
||||
node_data.update({'name': kwargs['name'],
|
||||
'cpu': size.cpu,
|
||||
'mem': size.ram,
|
||||
'ide:0:0': drive_uuid,
|
||||
'boot': 'ide:0:0',
|
||||
'smp': smp})
|
||||
node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'})
|
||||
|
||||
if vnc_password:
|
||||
node_data.update({'vnc': 'auto', 'vnc:password': vnc_password})
|
||||
|
||||
response = self.connection.request(
|
||||
action='/servers/create', data=json.dumps(node_data),
|
||||
method='POST'
|
||||
).object
|
||||
|
||||
if isinstance(response, list):
|
||||
nodes = [self._to_node(node, ssh_password) for node in response]
|
||||
else:
|
||||
nodes = self._to_node(response, ssh_password)
|
||||
|
||||
return nodes
|
||||
|
||||
# Extension methods
|
||||
def ex_set_node_configuration(self, node, **kwargs):
|
||||
"""
|
||||
Changes the configuration of the running server
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param kwargs: keyword arguments
|
||||
:type kwargs: ``dict``
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$',
|
||||
'^boot$', '^nic:0:model$', '^nic:0:dhcp',
|
||||
'^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$',
|
||||
'^vnc:ip$', '^vnc:password$', '^vnc:tls',
|
||||
'^ide:[0-1]:[0-1](:media)?$',
|
||||
'^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$')
|
||||
|
||||
invalid_keys = []
|
||||
keys = list(kwargs.keys())
|
||||
for key in keys:
|
||||
matches = False
|
||||
for regex in valid_keys:
|
||||
if re.match(regex, key):
|
||||
matches = True
|
||||
break
|
||||
if not matches:
|
||||
invalid_keys.append(key)
|
||||
|
||||
if invalid_keys:
|
||||
raise ElasticStackException(
|
||||
'Invalid configuration key specified: %s'
|
||||
% (',' .join(invalid_keys))
|
||||
)
|
||||
|
||||
response = self.connection.request(
|
||||
action='/servers/%s/set' % (node.id), data=json.dumps(kwargs),
|
||||
method='POST'
|
||||
)
|
||||
|
||||
return (response.status == httplib.OK and response.body != '')
|
||||
|
||||
def deploy_node(self, **kwargs):
|
||||
"""
|
||||
Create a new node, and start deployment.
|
||||
|
||||
@inherits: :class:`NodeDriver.deploy_node`
|
||||
|
||||
:keyword enable_root: If true, root password will be set to
|
||||
vnc_password (this will enable SSH access)
|
||||
and default 'toor' account will be deleted.
|
||||
:type enable_root: ``bool``
|
||||
"""
|
||||
image = kwargs['image']
|
||||
vnc_password = kwargs.get('vnc_password', None)
|
||||
enable_root = kwargs.get('enable_root', False)
|
||||
|
||||
if not vnc_password:
|
||||
raise ValueError('You need to provide vnc_password argument '
|
||||
'if you want to use deployment')
|
||||
|
||||
if (image in self._standard_drives and
|
||||
not self._standard_drives[image]['supports_deployment']):
|
||||
raise ValueError('Image %s does not support deployment'
|
||||
% (image.id))
|
||||
|
||||
if enable_root:
|
||||
script = ("unset HISTFILE;"
|
||||
"echo root:%s | chpasswd;"
|
||||
"sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;"
|
||||
"history -c") % vnc_password
|
||||
root_enable_script = ScriptDeployment(script=script,
|
||||
delete=True)
|
||||
deploy = kwargs.get('deploy', None)
|
||||
if deploy:
|
||||
if (isinstance(deploy, ScriptDeployment) or
|
||||
isinstance(deploy, SSHKeyDeployment)):
|
||||
deployment = MultiStepDeployment([deploy,
|
||||
root_enable_script])
|
||||
elif isinstance(deploy, MultiStepDeployment):
|
||||
deployment = deploy
|
||||
deployment.add(root_enable_script)
|
||||
else:
|
||||
deployment = root_enable_script
|
||||
|
||||
kwargs['deploy'] = deployment
|
||||
|
||||
if not kwargs.get('ssh_username', None):
|
||||
kwargs['ssh_username'] = 'toor'
|
||||
|
||||
return super(ElasticStackBaseNodeDriver, self).deploy_node(**kwargs)
|
||||
|
||||
def ex_shutdown_node(self, node):
|
||||
"""
|
||||
Sends the ACPI power-down event
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
response = self.connection.request(
|
||||
action='/servers/%s/shutdown' % (node.id),
|
||||
method='POST'
|
||||
)
|
||||
return response.status == 204
|
||||
|
||||
def ex_destroy_drive(self, drive_uuid):
|
||||
"""
|
||||
Deletes a drive
|
||||
|
||||
:param drive_uuid: Drive uuid which should be used
|
||||
:type drive_uuid: ``str``
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
response = self.connection.request(
|
||||
action='/drives/%s/destroy' % (drive_uuid),
|
||||
method='POST'
|
||||
)
|
||||
return response.status == 204
|
||||
|
||||
# Helper methods
|
||||
def _to_node(self, data, ssh_password=None):
|
||||
try:
|
||||
state = NODE_STATE_MAP[data['status']]
|
||||
except KeyError:
|
||||
state = NodeState.UNKNOWN
|
||||
|
||||
if isinstance(data['nic:0:dhcp'], list):
|
||||
public_ip = data['nic:0:dhcp']
|
||||
else:
|
||||
public_ip = [data['nic:0:dhcp']]
|
||||
|
||||
extra = {'cpu': data['cpu'],
|
||||
'smp': data['smp'],
|
||||
'mem': data['mem'],
|
||||
'started': data['started']}
|
||||
|
||||
if 'vnc:ip' in data:
|
||||
extra['vnc:ip'] = data['vnc:ip']
|
||||
|
||||
if 'vnc:password' in data:
|
||||
extra['vnc:password'] = data['vnc:password']
|
||||
|
||||
boot_device = data['boot']
|
||||
|
||||
if isinstance(boot_device, list):
|
||||
for device in boot_device:
|
||||
extra[device] = data[device]
|
||||
else:
|
||||
extra[boot_device] = data[boot_device]
|
||||
|
||||
if ssh_password:
|
||||
extra.update({'password': ssh_password})
|
||||
|
||||
node = Node(id=data['server'], name=data['name'], state=state,
|
||||
public_ips=public_ip, private_ips=None,
|
||||
driver=self.connection.driver,
|
||||
extra=extra)
|
||||
|
||||
return node
|
||||
31
awx/lib/site-packages/libcloud/compute/drivers/exoscale.py
Normal file
31
awx/lib/site-packages/libcloud/compute/drivers/exoscale.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
|
||||
|
||||
__all__ = [
|
||||
'ExoscaleNodeDriver'
|
||||
]
|
||||
|
||||
|
||||
class ExoscaleNodeDriver(CloudStackNodeDriver):
|
||||
type = Provider.EXOSCALE
|
||||
name = 'Exoscale'
|
||||
website = 'https://www.exoscale.ch/'
|
||||
|
||||
# API endpoint info
|
||||
host = 'api.exoscale.ch'
|
||||
path = '/compute'
|
||||
619
awx/lib/site-packages/libcloud/compute/drivers/gandi.py
Normal file
619
awx/lib/site-packages/libcloud/compute/drivers/gandi.py
Normal file
@@ -0,0 +1,619 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Gandi driver for compute
|
||||
"""
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from libcloud.common.gandi import BaseGandiDriver, GandiException,\
|
||||
NetworkInterface, IPAddress, Disk
|
||||
from libcloud.compute.base import StorageVolume
|
||||
from libcloud.compute.types import NodeState, Provider
|
||||
from libcloud.compute.base import Node, NodeDriver
|
||||
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
|
||||
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'running': NodeState.RUNNING,
|
||||
'halted': NodeState.TERMINATED,
|
||||
'paused': NodeState.TERMINATED,
|
||||
'locked': NodeState.TERMINATED,
|
||||
'being_created': NodeState.PENDING,
|
||||
'invalid': NodeState.UNKNOWN,
|
||||
'legally_locked': NodeState.PENDING,
|
||||
'deleted': NodeState.TERMINATED
|
||||
}
|
||||
|
||||
NODE_PRICE_HOURLY_USD = 0.02
|
||||
|
||||
INSTANCE_TYPES = {
|
||||
'small': {
|
||||
'id': 'small',
|
||||
'name': 'Small instance',
|
||||
'cpu': 1,
|
||||
'memory': 256,
|
||||
'disk': 3,
|
||||
'bandwidth': 10240,
|
||||
},
|
||||
'medium': {
|
||||
'id': 'medium',
|
||||
'name': 'Medium instance',
|
||||
'cpu': 1,
|
||||
'memory': 1024,
|
||||
'disk': 20,
|
||||
'bandwidth': 10240,
|
||||
},
|
||||
'large': {
|
||||
'id': 'large',
|
||||
'name': 'Large instance',
|
||||
'cpu': 2,
|
||||
'memory': 2048,
|
||||
'disk': 50,
|
||||
'bandwidth': 10240,
|
||||
},
|
||||
'x-large': {
|
||||
'id': 'x-large',
|
||||
'name': 'Extra Large instance',
|
||||
'cpu': 4,
|
||||
'memory': 4096,
|
||||
'disk': 100,
|
||||
'bandwidth': 10240,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class GandiNodeDriver(BaseGandiDriver, NodeDriver):
|
||||
"""
|
||||
Gandi node driver
|
||||
|
||||
"""
|
||||
api_name = 'gandi'
|
||||
friendly_name = 'Gandi.net'
|
||||
website = 'http://www.gandi.net/'
|
||||
country = 'FR'
|
||||
type = Provider.GANDI
|
||||
# TODO : which features to enable ?
|
||||
features = {}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.__init__`
|
||||
"""
|
||||
super(BaseGandiDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def _resource_info(self, type, id):
|
||||
try:
|
||||
obj = self.connection.request('hosting.%s.info' % type, int(id))
|
||||
return obj.object
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
raise GandiException(1003, e)
|
||||
return None
|
||||
|
||||
def _node_info(self, id):
|
||||
return self._resource_info('vm', id)
|
||||
|
||||
def _volume_info(self, id):
|
||||
return self._resource_info('disk', id)
|
||||
|
||||
# Generic methods for driver
|
||||
def _to_node(self, vm):
|
||||
return Node(
|
||||
id=vm['id'],
|
||||
name=vm['hostname'],
|
||||
state=NODE_STATE_MAP.get(
|
||||
vm['state'],
|
||||
NodeState.UNKNOWN
|
||||
),
|
||||
public_ips=vm.get('ips', []),
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
extra={
|
||||
'ai_active': vm.get('ai_active'),
|
||||
'datacenter_id': vm.get('datacenter_id'),
|
||||
'description': vm.get('description')
|
||||
}
|
||||
)
|
||||
|
||||
def _to_nodes(self, vms):
|
||||
return [self._to_node(v) for v in vms]
|
||||
|
||||
def _to_volume(self, disk):
|
||||
extra = {'can_snapshot': disk['can_snapshot']}
|
||||
return StorageVolume(
|
||||
id=disk['id'],
|
||||
name=disk['name'],
|
||||
size=int(disk['size']),
|
||||
driver=self,
|
||||
extra=extra)
|
||||
|
||||
def _to_volumes(self, disks):
|
||||
return [self._to_volume(d) for d in disks]
|
||||
|
||||
def list_nodes(self):
|
||||
vms = self.connection.request('hosting.vm.list').object
|
||||
ips = self.connection.request('hosting.ip.list').object
|
||||
for vm in vms:
|
||||
vm['ips'] = []
|
||||
for ip in ips:
|
||||
if vm['ifaces_id'][0] == ip['iface_id']:
|
||||
ip = ip.get('ip', None)
|
||||
if ip:
|
||||
vm['ips'].append(ip)
|
||||
|
||||
nodes = self._to_nodes(vms)
|
||||
return nodes
|
||||
|
||||
def reboot_node(self, node):
|
||||
op = self.connection.request('hosting.vm.reboot', int(node.id))
|
||||
self._wait_operation(op.object['id'])
|
||||
vm = self._node_info(int(node.id))
|
||||
if vm['state'] == 'running':
|
||||
return True
|
||||
return False
|
||||
|
||||
def destroy_node(self, node):
|
||||
vm = self._node_info(node.id)
|
||||
if vm['state'] == 'running':
|
||||
# Send vm_stop and wait for accomplish
|
||||
op_stop = self.connection.request('hosting.vm.stop', int(node.id))
|
||||
if not self._wait_operation(op_stop.object['id']):
|
||||
raise GandiException(1010, 'vm.stop failed')
|
||||
# Delete
|
||||
op = self.connection.request('hosting.vm.delete', int(node.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def deploy_node(self, **kwargs):
|
||||
"""
|
||||
deploy_node is not implemented for gandi driver
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'deploy_node not implemented for gandi driver')
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Create a new Gandi node
|
||||
|
||||
:keyword name: String with a name for this new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword image: OS Image to boot on node. (required)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword location: Which data center to create a node in. If empty,
|
||||
undefined behavior will be selected. (optional)
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:keyword size: The size of resources allocated to this node.
|
||||
(required)
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword login: user name to create for login on machine (required)
|
||||
:type login: ``str``
|
||||
|
||||
:keyword password: password for user that'll be created (required)
|
||||
:type password: ``str``
|
||||
|
||||
:keyword inet_family: version of ip to use, default 4 (optional)
|
||||
:type inet_family: ``int``
|
||||
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
|
||||
if kwargs.get('login') is None or kwargs.get('password') is None:
|
||||
raise GandiException(
|
||||
1020, 'login and password must be defined for node creation')
|
||||
|
||||
location = kwargs.get('location')
|
||||
if location and isinstance(location, NodeLocation):
|
||||
dc_id = int(location.id)
|
||||
else:
|
||||
raise GandiException(
|
||||
1021, 'location must be a subclass of NodeLocation')
|
||||
|
||||
size = kwargs.get('size')
|
||||
if not size and not isinstance(size, NodeSize):
|
||||
raise GandiException(
|
||||
1022, 'size must be a subclass of NodeSize')
|
||||
|
||||
# If size name is in INSTANCE_TYPE we use new rating model
|
||||
instance = INSTANCE_TYPES.get(size.id)
|
||||
cores = instance['cpu'] if instance else int(size.id)
|
||||
|
||||
src_disk_id = int(kwargs['image'].id)
|
||||
|
||||
disk_spec = {
|
||||
'datacenter_id': dc_id,
|
||||
'name': 'disk_%s' % kwargs['name']
|
||||
}
|
||||
|
||||
vm_spec = {
|
||||
'datacenter_id': dc_id,
|
||||
'hostname': kwargs['name'],
|
||||
'login': kwargs['login'],
|
||||
'password': kwargs['password'], # TODO : use NodeAuthPassword
|
||||
'memory': int(size.ram),
|
||||
'cores': cores,
|
||||
'bandwidth': int(size.bandwidth),
|
||||
'ip_version': kwargs.get('inet_family', 4),
|
||||
}
|
||||
|
||||
# Call create_from helper api. Return 3 operations : disk_create,
|
||||
# iface_create,vm_create
|
||||
(op_disk, op_iface, op_vm) = self.connection.request(
|
||||
'hosting.vm.create_from',
|
||||
vm_spec, disk_spec, src_disk_id
|
||||
).object
|
||||
|
||||
# We wait for vm_create to finish
|
||||
if self._wait_operation(op_vm['id']):
|
||||
# after successful operation, get ip information
|
||||
# thru first interface
|
||||
node = self._node_info(op_vm['vm_id'])
|
||||
ifaces = node.get('ifaces')
|
||||
if len(ifaces) > 0:
|
||||
ips = ifaces[0].get('ips')
|
||||
if len(ips) > 0:
|
||||
node['ip'] = ips[0]['ip']
|
||||
return self._to_node(node)
|
||||
|
||||
return None
|
||||
|
||||
def _to_image(self, img):
|
||||
return NodeImage(
|
||||
id=img['disk_id'],
|
||||
name=img['label'],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
def list_images(self, location=None):
|
||||
try:
|
||||
if location:
|
||||
filtering = {'datacenter_id': int(location.id)}
|
||||
else:
|
||||
filtering = {}
|
||||
images = self.connection.request('hosting.image.list', filtering)
|
||||
return [self._to_image(i) for i in images.object]
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
raise GandiException(1011, e)
|
||||
|
||||
def _to_size(self, id, size):
|
||||
return NodeSize(
|
||||
id=id,
|
||||
name='%s cores' % id,
|
||||
ram=size['memory'],
|
||||
disk=size['disk'],
|
||||
bandwidth=size['bandwidth'],
|
||||
price=(self._get_size_price(size_id='1') * id),
|
||||
driver=self.connection.driver,
|
||||
)
|
||||
|
||||
def _instance_type_to_size(self, instance):
|
||||
return NodeSize(
|
||||
id=instance['id'],
|
||||
name=instance['name'],
|
||||
ram=instance['memory'],
|
||||
disk=instance['disk'],
|
||||
bandwidth=instance['bandwidth'],
|
||||
price=self._get_size_price(size_id=instance['id']),
|
||||
driver=self.connection.driver,
|
||||
)
|
||||
|
||||
def list_instance_type(self, location=None):
|
||||
return [self._instance_type_to_size(instance)
|
||||
for name, instance in INSTANCE_TYPES.items()]
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
account = self.connection.request('hosting.account.info').object
|
||||
if account.get('rating_enabled'):
|
||||
# This account use new rating model
|
||||
return self.list_instance_type(location)
|
||||
# Look for available shares, and return a list of share_definition
|
||||
available_res = account['resources']['available']
|
||||
|
||||
if available_res['shares'] == 0:
|
||||
return None
|
||||
else:
|
||||
share_def = account['share_definition']
|
||||
available_cores = available_res['cores']
|
||||
# 0.75 core given when creating a server
|
||||
max_core = int(available_cores + 0.75)
|
||||
shares = []
|
||||
if available_res['servers'] < 1:
|
||||
# No server quota, no way
|
||||
return shares
|
||||
for i in range(1, max_core + 1):
|
||||
share = {id: i}
|
||||
share_is_available = True
|
||||
for k in ['memory', 'disk', 'bandwidth']:
|
||||
if share_def[k] * i > available_res[k]:
|
||||
# We run out for at least one resource inside
|
||||
share_is_available = False
|
||||
else:
|
||||
share[k] = share_def[k] * i
|
||||
if share_is_available:
|
||||
nb_core = i
|
||||
shares.append(self._to_size(nb_core, share))
|
||||
return shares
|
||||
|
||||
def _to_loc(self, loc):
|
||||
return NodeLocation(
|
||||
id=loc['id'],
|
||||
name=loc['name'],
|
||||
country=loc['country'],
|
||||
driver=self
|
||||
)
|
||||
|
||||
def list_locations(self):
|
||||
res = self.connection.request('hosting.datacenter.list')
|
||||
return [self._to_loc(l) for l in res.object]
|
||||
|
||||
def list_volumes(self):
|
||||
"""
|
||||
|
||||
:rtype: ``list`` of :class:`StorageVolume`
|
||||
"""
|
||||
res = self.connection.request('hosting.disk.list', {})
|
||||
return self._to_volumes(res.object)
|
||||
|
||||
def create_volume(self, size, name, location=None, snapshot=None):
|
||||
disk_param = {
|
||||
'name': name,
|
||||
'size': int(size),
|
||||
'datacenter_id': int(location.id)
|
||||
}
|
||||
if snapshot:
|
||||
op = self.connection.request('hosting.disk.create_from',
|
||||
disk_param, int(snapshot.id))
|
||||
else:
|
||||
op = self.connection.request('hosting.disk.create', disk_param)
|
||||
if self._wait_operation(op.object['id']):
|
||||
disk = self._volume_info(op.object['disk_id'])
|
||||
return self._to_volume(disk)
|
||||
return None
|
||||
|
||||
def attach_volume(self, node, volume, device=None):
|
||||
op = self.connection.request('hosting.vm.disk_attach',
|
||||
int(node.id), int(volume.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def detach_volume(self, node, volume):
|
||||
"""
|
||||
Detaches a volume from a node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param volume: Volume to be detached
|
||||
:type volume: :class:`StorageVolume`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
op = self.connection.request('hosting.vm.disk_detach',
|
||||
int(node.id), int(volume.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def destroy_volume(self, volume):
|
||||
op = self.connection.request('hosting.disk.delete', int(volume.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _to_iface(self, iface):
|
||||
ips = []
|
||||
for ip in iface.get('ips', []):
|
||||
new_ip = IPAddress(
|
||||
ip['id'],
|
||||
NODE_STATE_MAP.get(
|
||||
ip['state'],
|
||||
NodeState.UNKNOWN
|
||||
),
|
||||
ip['ip'],
|
||||
self.connection.driver,
|
||||
version=ip.get('version'),
|
||||
extra={'reverse': ip['reverse']}
|
||||
)
|
||||
ips.append(new_ip)
|
||||
return NetworkInterface(
|
||||
iface['id'],
|
||||
NODE_STATE_MAP.get(
|
||||
iface['state'],
|
||||
NodeState.UNKNOWN
|
||||
),
|
||||
mac_address=None,
|
||||
driver=self.connection.driver,
|
||||
ips=ips,
|
||||
node_id=iface.get('vm_id'),
|
||||
extra={'bandwidth': iface['bandwidth']},
|
||||
)
|
||||
|
||||
def _to_ifaces(self, ifaces):
|
||||
return [self._to_iface(i) for i in ifaces]
|
||||
|
||||
def ex_list_interfaces(self):
|
||||
"""
|
||||
Specific method to list network interfaces
|
||||
|
||||
:rtype: ``list`` of :class:`GandiNetworkInterface`
|
||||
"""
|
||||
ifaces = self.connection.request('hosting.iface.list').object
|
||||
ips = self.connection.request('hosting.ip.list').object
|
||||
for iface in ifaces:
|
||||
iface['ips'] = list(
|
||||
filter(lambda i: i['iface_id'] == iface['id'], ips))
|
||||
return self._to_ifaces(ifaces)
|
||||
|
||||
def _to_disk(self, element):
|
||||
disk = Disk(
|
||||
id=element['id'],
|
||||
state=NODE_STATE_MAP.get(
|
||||
element['state'],
|
||||
NodeState.UNKNOWN
|
||||
),
|
||||
name=element['name'],
|
||||
driver=self.connection.driver,
|
||||
size=element['size'],
|
||||
extra={'can_snapshot': element['can_snapshot']}
|
||||
)
|
||||
return disk
|
||||
|
||||
def _to_disks(self, elements):
|
||||
return [self._to_disk(el) for el in elements]
|
||||
|
||||
def ex_list_disks(self):
|
||||
"""
|
||||
Specific method to list all disk
|
||||
|
||||
:rtype: ``list`` of :class:`GandiDisk`
|
||||
"""
|
||||
res = self.connection.request('hosting.disk.list', {})
|
||||
return self._to_disks(res.object)
|
||||
|
||||
def ex_node_attach_disk(self, node, disk):
|
||||
"""
|
||||
Specific method to attach a disk to a node
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param disk: Disk which should be used
|
||||
:type disk: :class:`GandiDisk`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
op = self.connection.request('hosting.vm.disk_attach',
|
||||
int(node.id), int(disk.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def ex_node_detach_disk(self, node, disk):
|
||||
"""
|
||||
Specific method to detach a disk from a node
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param disk: Disk which should be used
|
||||
:type disk: :class:`GandiDisk`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
op = self.connection.request('hosting.vm.disk_detach',
|
||||
int(node.id), int(disk.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def ex_node_attach_interface(self, node, iface):
|
||||
"""
|
||||
Specific method to attach an interface to a node
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
|
||||
:param iface: Network interface which should be used
|
||||
:type iface: :class:`GandiNetworkInterface`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
op = self.connection.request('hosting.vm.iface_attach',
|
||||
int(node.id), int(iface.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def ex_node_detach_interface(self, node, iface):
|
||||
"""
|
||||
Specific method to detach an interface from a node
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
|
||||
:param iface: Network interface which should be used
|
||||
:type iface: :class:`GandiNetworkInterface`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
op = self.connection.request('hosting.vm.iface_detach',
|
||||
int(node.id), int(iface.id))
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def ex_snapshot_disk(self, disk, name=None):
|
||||
"""
|
||||
Specific method to make a snapshot of a disk
|
||||
|
||||
:param disk: Disk which should be used
|
||||
:type disk: :class:`GandiDisk`
|
||||
|
||||
:param name: Name which should be used
|
||||
:type name: ``str``
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
if not disk.extra.get('can_snapshot'):
|
||||
raise GandiException(1021, 'Disk %s can\'t snapshot' % disk.id)
|
||||
if not name:
|
||||
suffix = datetime.today().strftime('%Y%m%d')
|
||||
name = 'snap_%s' % (suffix)
|
||||
op = self.connection.request(
|
||||
'hosting.disk.create_from',
|
||||
{'name': name, 'type': 'snapshot', },
|
||||
int(disk.id),
|
||||
)
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
|
||||
def ex_update_disk(self, disk, new_size=None, new_name=None):
|
||||
"""Specific method to update size or name of a disk
|
||||
WARNING: if a server is attached it'll be rebooted
|
||||
|
||||
:param disk: Disk which should be used
|
||||
:type disk: :class:`GandiDisk`
|
||||
|
||||
:param new_size: New size
|
||||
:type new_size: ``int``
|
||||
|
||||
:param new_name: New name
|
||||
:type new_name: ``str``
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
params = {}
|
||||
if new_size:
|
||||
params.update({'size': new_size})
|
||||
if new_name:
|
||||
params.update({'name': new_name})
|
||||
op = self.connection.request('hosting.disk.update',
|
||||
int(disk.id),
|
||||
params)
|
||||
if self._wait_operation(op.object['id']):
|
||||
return True
|
||||
return False
|
||||
3346
awx/lib/site-packages/libcloud/compute/drivers/gce.py
Normal file
3346
awx/lib/site-packages/libcloud/compute/drivers/gce.py
Normal file
File diff suppressed because it is too large
Load Diff
464
awx/lib/site-packages/libcloud/compute/drivers/gogrid.py
Normal file
464
awx/lib/site-packages/libcloud/compute/drivers/gogrid.py
Normal file
@@ -0,0 +1,464 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
GoGrid driver
|
||||
"""
|
||||
import time
|
||||
import hashlib
|
||||
import copy
|
||||
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.types import NodeState
|
||||
from libcloud.compute.base import Node, NodeDriver
|
||||
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
|
||||
|
||||
STATE = {
|
||||
"Starting": NodeState.PENDING,
|
||||
"On": NodeState.RUNNING,
|
||||
"On/Saving": NodeState.RUNNING,
|
||||
"Off": NodeState.PENDING,
|
||||
"Restarting": NodeState.REBOOTING,
|
||||
"Saving": NodeState.PENDING,
|
||||
"Restoring": NodeState.PENDING,
|
||||
}
|
||||
|
||||
GOGRID_INSTANCE_TYPES = {
|
||||
'512MB': {'id': '512MB',
|
||||
'name': '512MB',
|
||||
'ram': 512,
|
||||
'disk': 30,
|
||||
'bandwidth': None},
|
||||
'1GB': {'id': '1GB',
|
||||
'name': '1GB',
|
||||
'ram': 1024,
|
||||
'disk': 60,
|
||||
'bandwidth': None},
|
||||
'2GB': {'id': '2GB',
|
||||
'name': '2GB',
|
||||
'ram': 2048,
|
||||
'disk': 120,
|
||||
'bandwidth': None},
|
||||
'4GB': {'id': '4GB',
|
||||
'name': '4GB',
|
||||
'ram': 4096,
|
||||
'disk': 240,
|
||||
'bandwidth': None},
|
||||
'8GB': {'id': '8GB',
|
||||
'name': '8GB',
|
||||
'ram': 8192,
|
||||
'disk': 480,
|
||||
'bandwidth': None},
|
||||
'16GB': {'id': '16GB',
|
||||
'name': '16GB',
|
||||
'ram': 16384,
|
||||
'disk': 960,
|
||||
'bandwidth': None},
|
||||
'24GB': {'id': '24GB',
|
||||
'name': '24GB',
|
||||
'ram': 24576,
|
||||
'disk': 960,
|
||||
'bandwidth': None},
|
||||
}
|
||||
|
||||
|
||||
class GoGridNode(Node):
|
||||
# Generating uuid based on public ip to get around missing id on
|
||||
# create_node in gogrid api
|
||||
#
|
||||
# Used public ip since it is not mutable and specified at create time,
|
||||
# so uuid of node should not change after add is completed
|
||||
def get_uuid(self):
|
||||
return hashlib.sha1(
|
||||
b("%s:%s" % (self.public_ips, self.driver.type))
|
||||
).hexdigest()
|
||||
|
||||
|
||||
class GoGridNodeDriver(BaseGoGridDriver, NodeDriver):
|
||||
"""
|
||||
GoGrid node driver
|
||||
"""
|
||||
|
||||
connectionCls = GoGridConnection
|
||||
type = Provider.GOGRID
|
||||
api_name = 'gogrid'
|
||||
name = 'GoGrid'
|
||||
website = 'http://www.gogrid.com/'
|
||||
features = {"create_node": ["generates_password"]}
|
||||
|
||||
_instance_types = GOGRID_INSTANCE_TYPES
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.__init__`
|
||||
"""
|
||||
super(GoGridNodeDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def _get_state(self, element):
|
||||
try:
|
||||
return STATE[element['state']['name']]
|
||||
except:
|
||||
pass
|
||||
return NodeState.UNKNOWN
|
||||
|
||||
def _get_ip(self, element):
|
||||
return element.get('ip').get('ip')
|
||||
|
||||
def _get_id(self, element):
|
||||
return element.get('id')
|
||||
|
||||
def _to_node(self, element, password=None):
|
||||
state = self._get_state(element)
|
||||
ip = self._get_ip(element)
|
||||
id = self._get_id(element)
|
||||
n = GoGridNode(id=id,
|
||||
name=element['name'],
|
||||
state=state,
|
||||
public_ips=[ip],
|
||||
private_ips=[],
|
||||
extra={'ram': element.get('ram').get('name'),
|
||||
'description': element.get('description', '')},
|
||||
driver=self.connection.driver)
|
||||
if password:
|
||||
n.extra['password'] = password
|
||||
|
||||
return n
|
||||
|
||||
def _to_image(self, element):
|
||||
n = NodeImage(id=element['id'],
|
||||
name=element['friendlyName'],
|
||||
driver=self.connection.driver)
|
||||
return n
|
||||
|
||||
def _to_images(self, object):
|
||||
return [self._to_image(el)
|
||||
for el in object['list']]
|
||||
|
||||
def _to_location(self, element):
|
||||
location = NodeLocation(id=element['id'],
|
||||
name=element['name'],
|
||||
country="US",
|
||||
driver=self.connection.driver)
|
||||
return location
|
||||
|
||||
def _to_locations(self, object):
|
||||
return [self._to_location(el)
|
||||
for el in object['list']]
|
||||
|
||||
def list_images(self, location=None):
|
||||
params = {}
|
||||
if location is not None:
|
||||
params["datacenter"] = location.id
|
||||
images = self._to_images(
|
||||
self.connection.request('/api/grid/image/list', params).object)
|
||||
return images
|
||||
|
||||
def list_nodes(self):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.list_nodes`
|
||||
:rtype: ``list`` of :class:`GoGridNode`
|
||||
"""
|
||||
passwords_map = {}
|
||||
|
||||
res = self._server_list()
|
||||
try:
|
||||
for password in self._password_list()['list']:
|
||||
try:
|
||||
passwords_map[password['server']['id']] = \
|
||||
password['password']
|
||||
except KeyError:
|
||||
pass
|
||||
except InvalidCredsError:
|
||||
# some gogrid API keys don't have permission to access the
|
||||
# password list.
|
||||
pass
|
||||
|
||||
return [self._to_node(el, passwords_map.get(el.get('id')))
|
||||
for el in res['list']]
|
||||
|
||||
def reboot_node(self, node):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.reboot_node`
|
||||
:type node: :class:`GoGridNode`
|
||||
"""
|
||||
id = node.id
|
||||
power = 'restart'
|
||||
res = self._server_power(id, power)
|
||||
if not res.success():
|
||||
raise Exception(res.parse_error())
|
||||
return True
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.reboot_node`
|
||||
:type node: :class:`GoGridNode`
|
||||
"""
|
||||
id = node.id
|
||||
res = self._server_delete(id)
|
||||
if not res.success():
|
||||
raise Exception(res.parse_error())
|
||||
return True
|
||||
|
||||
def _server_list(self):
|
||||
return self.connection.request('/api/grid/server/list').object
|
||||
|
||||
def _password_list(self):
|
||||
return self.connection.request('/api/support/password/list').object
|
||||
|
||||
def _server_power(self, id, power):
|
||||
# power in ['start', 'stop', 'restart']
|
||||
params = {'id': id, 'power': power}
|
||||
return self.connection.request("/api/grid/server/power", params,
|
||||
method='POST')
|
||||
|
||||
def _server_delete(self, id):
|
||||
params = {'id': id}
|
||||
return self.connection.request("/api/grid/server/delete", params,
|
||||
method='POST')
|
||||
|
||||
def _get_first_ip(self, location=None):
|
||||
ips = self.ex_list_ips(public=True, assigned=False, location=location)
|
||||
try:
|
||||
return ips[0].ip
|
||||
except IndexError:
|
||||
raise LibcloudError('No public unassigned IPs left',
|
||||
GoGridNodeDriver)
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
sizes = []
|
||||
for key, values in self._instance_types.items():
|
||||
attributes = copy.deepcopy(values)
|
||||
attributes.update({'price': self._get_size_price(size_id=key)})
|
||||
sizes.append(NodeSize(driver=self.connection.driver, **attributes))
|
||||
|
||||
return sizes
|
||||
|
||||
def list_locations(self):
|
||||
locations = self._to_locations(
|
||||
self.connection.request('/api/common/lookup/list',
|
||||
params={'lookup': 'ip.datacenter'}).object)
|
||||
return locations
|
||||
|
||||
def ex_create_node_nowait(self, **kwargs):
|
||||
"""Don't block until GoGrid allocates id for a node
|
||||
but return right away with id == None.
|
||||
|
||||
The existence of this method is explained by the fact
|
||||
that GoGrid assigns id to a node only few minutes after
|
||||
creation.
|
||||
|
||||
|
||||
:keyword name: String with a name for this new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword size: The size of resources allocated to this node .
|
||||
(required)
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword image: OS Image to boot on node. (required)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword ex_description: Description of a Node
|
||||
:type ex_description: ``str``
|
||||
|
||||
:keyword ex_ip: Public IP address to use for a Node. If not
|
||||
specified, first available IP address will be picked
|
||||
:type ex_ip: ``str``
|
||||
|
||||
:rtype: :class:`GoGridNode`
|
||||
"""
|
||||
name = kwargs['name']
|
||||
image = kwargs['image']
|
||||
size = kwargs['size']
|
||||
try:
|
||||
ip = kwargs['ex_ip']
|
||||
except KeyError:
|
||||
ip = self._get_first_ip(kwargs.get('location'))
|
||||
|
||||
params = {'name': name,
|
||||
'image': image.id,
|
||||
'description': kwargs.get('ex_description', ''),
|
||||
'server.ram': size.id,
|
||||
'ip': ip}
|
||||
|
||||
object = self.connection.request('/api/grid/server/add',
|
||||
params=params, method='POST').object
|
||||
node = self._to_node(object['list'][0])
|
||||
|
||||
return node
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create a new GoGird node
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword ex_description: Description of a Node
|
||||
:type ex_description: ``str``
|
||||
|
||||
:keyword ex_ip: Public IP address to use for a Node. If not
|
||||
specified, first available IP address will be picked
|
||||
:type ex_ip: ``str``
|
||||
|
||||
:rtype: :class:`GoGridNode`
|
||||
"""
|
||||
node = self.ex_create_node_nowait(**kwargs)
|
||||
|
||||
timeout = 60 * 20
|
||||
waittime = 0
|
||||
interval = 2 * 60
|
||||
|
||||
while node.id is None and waittime < timeout:
|
||||
nodes = self.list_nodes()
|
||||
|
||||
for i in nodes:
|
||||
if i.public_ips[0] == node.public_ips[0] and i.id is not None:
|
||||
return i
|
||||
|
||||
waittime += interval
|
||||
time.sleep(interval)
|
||||
|
||||
if id is None:
|
||||
raise Exception(
|
||||
"Wasn't able to wait for id allocation for the node %s"
|
||||
% str(node))
|
||||
|
||||
return node
|
||||
|
||||
def ex_save_image(self, node, name):
|
||||
"""Create an image for node.
|
||||
|
||||
Please refer to GoGrid documentation to get info
|
||||
how prepare a node for image creation:
|
||||
|
||||
http://wiki.gogrid.com/wiki/index.php/MyGSI
|
||||
|
||||
:keyword node: node to use as a base for image
|
||||
:type node: :class:`GoGridNode`
|
||||
|
||||
:keyword name: name for new image
|
||||
:type name: ``str``
|
||||
|
||||
:rtype: :class:`NodeImage`
|
||||
"""
|
||||
params = {'server': node.id,
|
||||
'friendlyName': name}
|
||||
object = self.connection.request('/api/grid/image/save', params=params,
|
||||
method='POST').object
|
||||
|
||||
return self._to_images(object)[0]
|
||||
|
||||
def ex_edit_node(self, **kwargs):
|
||||
"""Change attributes of a node.
|
||||
|
||||
:keyword node: node to be edited (required)
|
||||
:type node: :class:`GoGridNode`
|
||||
|
||||
:keyword size: new size of a node (required)
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword ex_description: new description of a node
|
||||
:type ex_description: ``str``
|
||||
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
node = kwargs['node']
|
||||
size = kwargs['size']
|
||||
|
||||
params = {'id': node.id,
|
||||
'server.ram': size.id}
|
||||
|
||||
if 'ex_description' in kwargs:
|
||||
params['description'] = kwargs['ex_description']
|
||||
|
||||
object = self.connection.request('/api/grid/server/edit',
|
||||
params=params).object
|
||||
|
||||
return self._to_node(object['list'][0])
|
||||
|
||||
def ex_edit_image(self, **kwargs):
|
||||
"""Edit metadata of a server image.
|
||||
|
||||
:keyword image: image to be edited (required)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword public: should be the image public (required)
|
||||
:type public: ``bool``
|
||||
|
||||
:keyword ex_description: description of the image (optional)
|
||||
:type ex_description: ``str``
|
||||
|
||||
:keyword name: name of the image
|
||||
:type name: ``str``
|
||||
|
||||
:rtype: :class:`NodeImage`
|
||||
"""
|
||||
|
||||
image = kwargs['image']
|
||||
public = kwargs['public']
|
||||
|
||||
params = {'id': image.id,
|
||||
'isPublic': str(public).lower()}
|
||||
|
||||
if 'ex_description' in kwargs:
|
||||
params['description'] = kwargs['ex_description']
|
||||
|
||||
if 'name' in kwargs:
|
||||
params['friendlyName'] = kwargs['name']
|
||||
|
||||
object = self.connection.request('/api/grid/image/edit',
|
||||
params=params).object
|
||||
|
||||
return self._to_image(object['list'][0])
|
||||
|
||||
def ex_list_ips(self, **kwargs):
|
||||
"""Return list of IP addresses assigned to
|
||||
the account.
|
||||
|
||||
:keyword public: set to True to list only
|
||||
public IPs or False to list only
|
||||
private IPs. Set to None or not specify
|
||||
at all not to filter by type
|
||||
:type public: ``bool``
|
||||
|
||||
:keyword assigned: set to True to list only addresses
|
||||
assigned to servers, False to list unassigned
|
||||
addresses and set to None or don't set at all
|
||||
not no filter by state
|
||||
:type assigned: ``bool``
|
||||
|
||||
:keyword location: filter IP addresses by location
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:rtype: ``list`` of :class:`GoGridIpAddress`
|
||||
"""
|
||||
|
||||
params = {}
|
||||
|
||||
if "public" in kwargs and kwargs["public"] is not None:
|
||||
params["ip.type"] = {True: "Public",
|
||||
False: "Private"}[kwargs["public"]]
|
||||
if "assigned" in kwargs and kwargs["assigned"] is not None:
|
||||
params["ip.state"] = {True: "Assigned",
|
||||
False: "Unassigned"}[kwargs["assigned"]]
|
||||
if "location" in kwargs and kwargs['location'] is not None:
|
||||
params['datacenter'] = kwargs['location'].id
|
||||
|
||||
ips = self._to_ips(
|
||||
self.connection.request('/api/grid/ip/list',
|
||||
params=params).object)
|
||||
return ips
|
||||
127
awx/lib/site-packages/libcloud/compute/drivers/gridspot.py
Normal file
127
awx/lib/site-packages/libcloud/compute/drivers/gridspot.py
Normal file
@@ -0,0 +1,127 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.compute.base import NodeDriver, Node
|
||||
from libcloud.compute.base import NodeState
|
||||
from libcloud.common.base import ConnectionKey, JsonResponse
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
|
||||
|
||||
class GridspotAPIException(Exception):
|
||||
def __str__(self):
|
||||
return self.args[0]
|
||||
|
||||
def __repr__(self):
|
||||
return "<GridspotAPIException '%s'>" % (self.args[0])
|
||||
|
||||
|
||||
class GridspotResponse(JsonResponse):
|
||||
"""
|
||||
Response class for Gridspot
|
||||
"""
|
||||
def parse_body(self):
|
||||
body = super(GridspotResponse, self).parse_body()
|
||||
|
||||
if 'exception_name' in body and body['exception_name']:
|
||||
raise GridspotAPIException(body['exception_name'])
|
||||
|
||||
return body
|
||||
|
||||
def parse_error(self):
|
||||
# Gridspot 404s on invalid api key or instance_id
|
||||
raise InvalidCredsError("Invalid api key/instance_id")
|
||||
|
||||
|
||||
class GridspotConnection(ConnectionKey):
|
||||
"""
|
||||
Connection class to connect to Gridspot's API servers
|
||||
"""
|
||||
|
||||
host = 'gridspot.com'
|
||||
responseCls = GridspotResponse
|
||||
|
||||
def add_default_params(self, params):
|
||||
params['api_key'] = self.key
|
||||
return params
|
||||
|
||||
|
||||
class GridspotNodeDriver(NodeDriver):
|
||||
"""
|
||||
Gridspot (http://www.gridspot.com/) node driver.
|
||||
"""
|
||||
|
||||
type = Provider.GRIDSPOT
|
||||
name = 'Gridspot'
|
||||
website = 'http://www.gridspot.com/'
|
||||
connectionCls = GridspotConnection
|
||||
NODE_STATE_MAP = {
|
||||
'Running': NodeState.RUNNING,
|
||||
'Starting': NodeState.PENDING
|
||||
}
|
||||
|
||||
def list_nodes(self):
|
||||
data = self.connection.request(
|
||||
'/compute_api/v1/list_instances').object
|
||||
return [self._to_node(n) for n in data['instances']]
|
||||
|
||||
def destroy_node(self, node):
|
||||
data = {'instance_id': node.id}
|
||||
self.connection.request('/compute_api/v1/stop_instance', data).object
|
||||
return True
|
||||
|
||||
def _get_node_state(self, state):
|
||||
result = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
|
||||
return result
|
||||
|
||||
def _add_int_param(self, params, data, field):
|
||||
if data[field]:
|
||||
try:
|
||||
params[field] = int(data[field])
|
||||
except:
|
||||
pass
|
||||
|
||||
def _to_node(self, data):
|
||||
port = None
|
||||
ip = None
|
||||
|
||||
state = self._get_node_state(data['current_state'])
|
||||
|
||||
if data['vm_ssh_wan_ip_endpoint'] != 'null':
|
||||
parts = data['vm_ssh_wan_ip_endpoint'].split(':')
|
||||
ip = parts[0]
|
||||
port = int(parts[1])
|
||||
|
||||
extra_params = {
|
||||
'winning_bid_id': data['winning_bid_id'],
|
||||
'port': port
|
||||
}
|
||||
|
||||
# Spec is vague and doesn't indicate if these will always be present
|
||||
self._add_int_param(extra_params, data, 'vm_num_logical_cores')
|
||||
self._add_int_param(extra_params, data, 'vm_num_physical_cores')
|
||||
self._add_int_param(extra_params, data, 'vm_ram')
|
||||
self._add_int_param(extra_params, data, 'start_state_time')
|
||||
self._add_int_param(extra_params, data, 'ended_state_time')
|
||||
self._add_int_param(extra_params, data, 'running_state_time')
|
||||
|
||||
return Node(
|
||||
id=data['instance_id'],
|
||||
name=data['instance_id'],
|
||||
state=state,
|
||||
public_ips=[ip],
|
||||
private_ips=[],
|
||||
driver=self.connection.driver,
|
||||
extra=extra_params)
|
||||
341
awx/lib/site-packages/libcloud/compute/drivers/hostvirtual.py
Normal file
341
awx/lib/site-packages/libcloud/compute/drivers/hostvirtual.py
Normal file
@@ -0,0 +1,341 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
libcloud driver for the Host Virtual Inc. (VR) API
|
||||
Home page http://www.vr.org/
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.common.hostvirtual import HostVirtualResponse
|
||||
from libcloud.common.hostvirtual import HostVirtualConnection
|
||||
from libcloud.common.hostvirtual import HostVirtualException
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.types import NodeState
|
||||
from libcloud.compute.base import Node, NodeDriver
|
||||
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
|
||||
from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
|
||||
|
||||
API_ROOT = ''
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'BUILDING': NodeState.PENDING,
|
||||
'PENDING': NodeState.PENDING,
|
||||
'RUNNING': NodeState.RUNNING, # server is powered up
|
||||
'STOPPING': NodeState.REBOOTING,
|
||||
'REBOOTING': NodeState.REBOOTING,
|
||||
'STARTING': NodeState.REBOOTING,
|
||||
'TERMINATED': NodeState.TERMINATED, # server is powered down
|
||||
'STOPPED': NodeState.STOPPED
|
||||
}
|
||||
|
||||
DEFAULT_NODE_LOCATION_ID = 4
|
||||
|
||||
|
||||
class HostVirtualComputeResponse(HostVirtualResponse):
|
||||
pass
|
||||
|
||||
|
||||
class HostVirtualComputeConnection(HostVirtualConnection):
|
||||
responseCls = HostVirtualComputeResponse
|
||||
|
||||
|
||||
class HostVirtualNodeDriver(NodeDriver):
|
||||
type = Provider.HOSTVIRTUAL
|
||||
name = 'HostVirtual'
|
||||
website = 'http://www.vr.org'
|
||||
connectionCls = HostVirtualComputeConnection
|
||||
features = {'create_node': ['ssh_key', 'password']}
|
||||
|
||||
def __init__(self, key, secure=True, host=None, port=None):
|
||||
self.location = None
|
||||
super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure,
|
||||
host=host, port=port)
|
||||
|
||||
def _to_node(self, data):
|
||||
state = NODE_STATE_MAP[data['status']]
|
||||
public_ips = []
|
||||
private_ips = []
|
||||
extra = {}
|
||||
|
||||
if 'plan_id' in data:
|
||||
extra['size'] = data['plan_id']
|
||||
if 'os_id' in data:
|
||||
extra['image'] = data['os_id']
|
||||
if 'location_id' in data:
|
||||
extra['location'] = data['location_id']
|
||||
if 'ip' in data:
|
||||
public_ips.append(data['ip'])
|
||||
|
||||
node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state,
|
||||
public_ips=public_ips, private_ips=private_ips,
|
||||
driver=self.connection.driver, extra=extra)
|
||||
return node
|
||||
|
||||
def list_locations(self):
|
||||
result = self.connection.request(API_ROOT + '/cloud/locations/').object
|
||||
locations = []
|
||||
for dc in result:
|
||||
locations.append(NodeLocation(
|
||||
dc["id"],
|
||||
dc["name"],
|
||||
dc["name"].split(',')[1].replace(" ", ""), # country
|
||||
self))
|
||||
return locations
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
params = {}
|
||||
if location:
|
||||
params = {'location': location.id}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/sizes/',
|
||||
data=json.dumps(params)).object
|
||||
sizes = []
|
||||
for size in result:
|
||||
n = NodeSize(id=size['plan_id'],
|
||||
name=size['plan'],
|
||||
ram=size['ram'],
|
||||
disk=size['disk'],
|
||||
bandwidth=size['transfer'],
|
||||
price=size['price'],
|
||||
driver=self.connection.driver)
|
||||
sizes.append(n)
|
||||
return sizes
|
||||
|
||||
def list_images(self):
|
||||
result = self.connection.request(API_ROOT + '/cloud/images/').object
|
||||
images = []
|
||||
for image in result:
|
||||
i = NodeImage(id=image["id"],
|
||||
name=image["os"],
|
||||
driver=self.connection.driver,
|
||||
extra=image)
|
||||
del i.extra['id']
|
||||
del i.extra['os']
|
||||
images.append(i)
|
||||
return images
|
||||
|
||||
def list_nodes(self):
|
||||
result = self.connection.request(API_ROOT + '/cloud/servers/').object
|
||||
nodes = []
|
||||
for value in result:
|
||||
node = self._to_node(value)
|
||||
nodes.append(node)
|
||||
return nodes
|
||||
|
||||
def _wait_for_node(self, node_id, timeout=30, interval=5.0):
|
||||
"""
|
||||
:param node_id: ID of the node to wait for.
|
||||
:type node_id: ``int``
|
||||
|
||||
:param timeout: Timeout (in seconds).
|
||||
:type timeout: ``int``
|
||||
|
||||
:param interval: How long to wait (in seconds) between each attempt.
|
||||
:type interval: ``float``
|
||||
"""
|
||||
# poll until we get a node
|
||||
for i in range(0, timeout, int(interval)):
|
||||
try:
|
||||
node = self.ex_get_node(node_id)
|
||||
return node
|
||||
except HostVirtualException:
|
||||
time.sleep(interval)
|
||||
|
||||
raise HostVirtualException(412, 'Timedout on getting node details')
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
dc = None
|
||||
|
||||
size = kwargs['size']
|
||||
image = kwargs['image']
|
||||
|
||||
auth = self._get_and_check_auth(kwargs.get('auth'))
|
||||
|
||||
params = {'plan': size.name}
|
||||
|
||||
dc = DEFAULT_NODE_LOCATION_ID
|
||||
if 'location' in kwargs:
|
||||
dc = kwargs['location'].id
|
||||
|
||||
# simply order a package first
|
||||
result = self.connection.request(API_ROOT + '/cloud/buy/',
|
||||
data=json.dumps(params),
|
||||
method='POST').object
|
||||
|
||||
# create a stub node
|
||||
stub_node = self._to_node({
|
||||
'mbpkgid': result['id'],
|
||||
'status': 'PENDING',
|
||||
'fqdn': kwargs['name'],
|
||||
'plan_id': size.id,
|
||||
'os_id': image.id,
|
||||
'location_id': dc
|
||||
})
|
||||
|
||||
# provisioning a server using the stub node
|
||||
self.ex_provision_node(node=stub_node, auth=auth)
|
||||
node = self._wait_for_node(stub_node.id)
|
||||
|
||||
if getattr(auth, 'generated', False):
|
||||
node.extra['password'] = auth.password
|
||||
|
||||
return node
|
||||
|
||||
def reboot_node(self, node):
|
||||
params = {'force': 0, 'mbpkgid': node.id}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/server/reboot',
|
||||
data=json.dumps(params),
|
||||
method='POST').object
|
||||
|
||||
return bool(result)
|
||||
|
||||
def destroy_node(self, node):
|
||||
params = {
|
||||
'mbpkgid': node.id,
|
||||
# 'reason': 'Submitted through Libcloud API'
|
||||
}
|
||||
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/cancel', data=json.dumps(params),
|
||||
method='POST').object
|
||||
|
||||
return bool(result)
|
||||
|
||||
def ex_get_node(self, node_id):
|
||||
"""
|
||||
Get a single node.
|
||||
|
||||
:param node_id: id of the node that we need the node object for
|
||||
:type node_id: ``str``
|
||||
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
|
||||
params = {'mbpkgid': node_id}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/server', params=params).object
|
||||
node = self._to_node(result)
|
||||
return node
|
||||
|
||||
def ex_stop_node(self, node):
|
||||
"""
|
||||
Stop a node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
params = {'force': 0, 'mbpkgid': node.id}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/server/shutdown',
|
||||
data=json.dumps(params),
|
||||
method='POST').object
|
||||
|
||||
return bool(result)
|
||||
|
||||
def ex_start_node(self, node):
|
||||
"""
|
||||
Start a node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
params = {'mbpkgid': node.id}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/server/start',
|
||||
data=json.dumps(params),
|
||||
method='POST').object
|
||||
|
||||
return bool(result)
|
||||
|
||||
def ex_provision_node(self, **kwargs):
|
||||
"""
|
||||
Provision a server on a VR package and get it booted
|
||||
|
||||
:keyword node: node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:keyword image: The distribution to deploy on your server (mandatory)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword auth: an SSH key or root password (mandatory)
|
||||
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
|
||||
|
||||
:keyword location: which datacenter to create the server in
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:return: Node representing the newly built server
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
|
||||
node = kwargs['node']
|
||||
|
||||
if 'image' in kwargs:
|
||||
image = kwargs['image']
|
||||
else:
|
||||
image = node.extra['image']
|
||||
|
||||
params = {
|
||||
'mbpkgid': node.id,
|
||||
'image': image,
|
||||
'fqdn': node.name,
|
||||
'location': node.extra['location'],
|
||||
}
|
||||
|
||||
auth = kwargs['auth']
|
||||
|
||||
ssh_key = None
|
||||
password = None
|
||||
if isinstance(auth, NodeAuthSSHKey):
|
||||
ssh_key = auth.pubkey
|
||||
params['ssh_key'] = ssh_key
|
||||
elif isinstance(auth, NodeAuthPassword):
|
||||
password = auth.password
|
||||
params['password'] = password
|
||||
|
||||
if not ssh_key and not password:
|
||||
raise HostVirtualException(500, "Need SSH key or Root password")
|
||||
|
||||
result = self.connection.request(API_ROOT + '/cloud/server/build',
|
||||
data=json.dumps(params),
|
||||
method='POST').object
|
||||
return bool(result)
|
||||
|
||||
def ex_delete_node(self, node):
|
||||
"""
|
||||
Delete a node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
|
||||
params = {'mbpkgid': node.id}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/cloud/server/delete', data=json.dumps(params),
|
||||
method='POST').object
|
||||
|
||||
return bool(result)
|
||||
99
awx/lib/site-packages/libcloud/compute/drivers/hpcloud.py
Normal file
99
awx/lib/site-packages/libcloud/compute/drivers/hpcloud.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
HP Public cloud driver which is esentially just a small wrapper around
|
||||
OpenStack driver.
|
||||
"""
|
||||
|
||||
from libcloud.compute.types import Provider, LibcloudError
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver
|
||||
|
||||
|
||||
__all__ = [
|
||||
'HPCloudNodeDriver'
|
||||
]
|
||||
|
||||
ENDPOINT_ARGS_MAP = {
|
||||
'region-a.geo-1': {
|
||||
'service_type': 'compute',
|
||||
'name': 'Compute',
|
||||
'region': 'region-a.geo-1'
|
||||
},
|
||||
'region-b.geo-1': {
|
||||
'service_type': 'compute',
|
||||
'name': 'Compute',
|
||||
'region': 'region-b.geo-1'
|
||||
},
|
||||
}
|
||||
|
||||
AUTH_URL_TEMPLATE = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens'
|
||||
|
||||
|
||||
class HPCloudConnection(OpenStack_1_1_Connection):
|
||||
_auth_version = '2.0_password'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.region = kwargs.pop('region', None)
|
||||
self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
|
||||
super(HPCloudConnection, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_endpoint(self):
|
||||
if not self.get_endpoint_args:
|
||||
raise LibcloudError(
|
||||
'HPCloudConnection must have get_endpoint_args set')
|
||||
|
||||
if '2.0_password' in self._auth_version:
|
||||
ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
|
||||
else:
|
||||
raise LibcloudError(
|
||||
'Auth version "%s" not supported' % (self._auth_version))
|
||||
|
||||
public_url = ep.get('publicURL', None)
|
||||
|
||||
if not public_url:
|
||||
raise LibcloudError('Could not find specified endpoint')
|
||||
|
||||
return public_url
|
||||
|
||||
|
||||
class HPCloudNodeDriver(OpenStack_1_1_NodeDriver):
|
||||
name = 'HP Public Cloud (Helion)'
|
||||
website = 'http://www.hpcloud.com/'
|
||||
connectionCls = HPCloudConnection
|
||||
type = Provider.HPCLOUD
|
||||
|
||||
def __init__(self, key, secret, tenant_name, secure=True,
|
||||
host=None, port=None, region='region-b.geo-1', **kwargs):
|
||||
"""
|
||||
Note: tenant_name argument is required for HP cloud.
|
||||
"""
|
||||
self.tenant_name = tenant_name
|
||||
super(HPCloudNodeDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port,
|
||||
region=region,
|
||||
**kwargs)
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
endpoint_args = ENDPOINT_ARGS_MAP[self.region]
|
||||
|
||||
kwargs = self.openstack_connection_kwargs()
|
||||
kwargs['region'] = self.region
|
||||
kwargs['get_endpoint_args'] = endpoint_args
|
||||
kwargs['ex_force_auth_url'] = AUTH_URL_TEMPLATE % (self.region)
|
||||
kwargs['ex_tenant_name'] = self.tenant_name
|
||||
|
||||
return kwargs
|
||||
753
awx/lib/site-packages/libcloud/compute/drivers/ibm_sce.py
Normal file
753
awx/lib/site-packages/libcloud/compute/drivers/ibm_sce.py
Normal file
@@ -0,0 +1,753 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Driver for IBM SmartCloud Enterprise
|
||||
|
||||
Formerly known as:
|
||||
- IBM Developer Cloud
|
||||
- IBM Smart Business Development and Test on the IBM Cloud
|
||||
- IBM SmartBusiness Cloud
|
||||
"""
|
||||
|
||||
import base64
|
||||
import time
|
||||
|
||||
from libcloud.utils.py3 import urlencode
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.compute.types import NodeState, Provider
|
||||
from libcloud.compute.base import NodeDriver, Node, NodeImage, \
|
||||
NodeSize, NodeLocation, NodeAuthSSHKey, StorageVolume
|
||||
|
||||
HOST = 'www-147.ibm.com'
|
||||
REST_BASE = '/computecloud/enterprise/api/rest/20100331'
|
||||
|
||||
|
||||
class IBMResponse(XmlResponse):
|
||||
def success(self):
|
||||
return int(self.status) == 200
|
||||
|
||||
def parse_error(self):
|
||||
if int(self.status) == 401:
|
||||
if not self.body:
|
||||
raise InvalidCredsError(str(self.status) + ': ' + self.error)
|
||||
else:
|
||||
raise InvalidCredsError(self.body)
|
||||
return self.body
|
||||
|
||||
|
||||
class IBMConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the IBM SmartCloud Enterprise driver
|
||||
"""
|
||||
|
||||
host = HOST
|
||||
responseCls = IBMResponse
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Accept'] = 'text/xml'
|
||||
headers['Authorization'] = ('Basic %s' % (base64.b64encode(
|
||||
b('%s:%s' % (self.user_id, self.key))).decode('utf-8')))
|
||||
if 'Content-Type' not in headers:
|
||||
headers['Content-Type'] = 'text/xml'
|
||||
return headers
|
||||
|
||||
def encode_data(self, data):
|
||||
return urlencode(data)
|
||||
|
||||
|
||||
class IBMNodeLocation(NodeLocation):
|
||||
"""
|
||||
Extends the base LibCloud NodeLocation to contain additional attributes
|
||||
"""
|
||||
def __init__(self, id, name, country, driver, extra=None):
|
||||
self.id = str(id)
|
||||
self.name = name
|
||||
self.country = country
|
||||
self.driver = driver
|
||||
self.extra = extra or {}
|
||||
|
||||
def __repr__(self):
|
||||
return ('<IBMNodeLocation: id=%s, name=%s, country=%s, '
|
||||
'driver=%s, extra=%s>' %
|
||||
(self.id, self.name, self.country, self.driver.name,
|
||||
self.extra))
|
||||
|
||||
|
||||
class VolumeState(object):
|
||||
"""
|
||||
The SCE specific states for a storage volume
|
||||
"""
|
||||
NEW = '0'
|
||||
CREATING = '1'
|
||||
DELETING = '2'
|
||||
DELETED = '3'
|
||||
DETACHED = '4'
|
||||
ATTACHED = '5'
|
||||
FAILED = '6'
|
||||
DELETE_PENDING = '7'
|
||||
BEING_CLONED = '8'
|
||||
CLONING = '9'
|
||||
ATTACHING = '10'
|
||||
DETACHING = '11'
|
||||
ATTACHIED = '12'
|
||||
IMPORTING = '13'
|
||||
TRANSFER_RETRYING = '14'
|
||||
|
||||
|
||||
class VolumeOffering(object):
|
||||
"""
|
||||
An SCE specific storage volume offering class.
|
||||
The volume offering ID is needed to create a volume.
|
||||
Volume offering IDs are different for each data center.
|
||||
"""
|
||||
def __init__(self, id, name, location, extra=None):
|
||||
self.id = id
|
||||
self.location = location
|
||||
self.name = name
|
||||
self.extra = extra or {}
|
||||
|
||||
def __repr__(self):
|
||||
return ('<VolumeOffering: id=%s, location=%s, name=%s, extra=%s>' %
|
||||
(self.id, self.location, self.name, self.extra))
|
||||
|
||||
|
||||
class Address(object):
|
||||
"""
|
||||
A reserved IP address that can be attached to an instance.
|
||||
Properties: id, ip, state, options(location, type, created_time, state,
|
||||
hostname, instance_ids, vlan, owner,
|
||||
mode, offering_id)
|
||||
"""
|
||||
def __init__(self, id, ip, state, options):
|
||||
self.id = id
|
||||
self.ip = ip
|
||||
self.state = state
|
||||
self.options = options
|
||||
|
||||
def __repr__(self):
|
||||
return ('<Address: id=%s, ip=%s, state=%s, options=%s>' %
|
||||
(self.id, self.ip, self.state, self.options))
|
||||
|
||||
|
||||
class IBMNodeDriver(NodeDriver):
|
||||
"""
|
||||
Node driver for IBM SmartCloud Enterprise
|
||||
"""
|
||||
connectionCls = IBMConnection
|
||||
type = Provider.IBM
|
||||
name = "IBM SmartCloud Enterprise"
|
||||
website = 'http://ibm.com/services/us/en/cloud-enterprise/'
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
0: NodeState.PENDING, # New
|
||||
1: NodeState.PENDING, # Provisioning
|
||||
2: NodeState.TERMINATED, # Failed
|
||||
3: NodeState.TERMINATED, # Removed
|
||||
4: NodeState.TERMINATED, # Rejected
|
||||
5: NodeState.RUNNING, # Active
|
||||
6: NodeState.UNKNOWN, # Unknown
|
||||
7: NodeState.PENDING, # Deprovisioning
|
||||
8: NodeState.REBOOTING, # Restarting
|
||||
9: NodeState.PENDING, # Starting
|
||||
10: NodeState.PENDING, # Stopping
|
||||
11: NodeState.TERMINATED, # Stopped
|
||||
12: NodeState.PENDING, # Deprovision Pending
|
||||
13: NodeState.PENDING, # Restart Pending
|
||||
14: NodeState.PENDING, # Attaching
|
||||
15: NodeState.PENDING, # Detaching
|
||||
}
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Creates a node in the IBM SmartCloud Enterprise.
|
||||
|
||||
See :class:`NodeDriver.create_node` for more keyword args.
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword auth: Name of the pubkey to use. When constructing
|
||||
:class:`NodeAuthSSHKey` instance, 'pubkey' argument must be the
|
||||
name of the public key to use. You chose this name when creating
|
||||
a new public key on the IBM server.
|
||||
:type auth: :class:`NodeAuthSSHKey`
|
||||
|
||||
:keyword ex_configurationData: Image-specific configuration
|
||||
parameters. Configuration parameters are defined in the parameters
|
||||
.xml file. The URL to this file is defined in the NodeImage at
|
||||
extra[parametersURL].
|
||||
Note: This argument must be specified when launching a Windows
|
||||
instance. It must contain 'UserName' and 'Password' keys.
|
||||
:type ex_configurationData: ``dict``
|
||||
"""
|
||||
|
||||
# Compose headers for message body
|
||||
data = {}
|
||||
data.update({'name': kwargs['name']})
|
||||
data.update({'imageID': kwargs['image'].id})
|
||||
data.update({'instanceType': kwargs['size'].id})
|
||||
if 'location' in kwargs:
|
||||
data.update({'location': kwargs['location'].id})
|
||||
else:
|
||||
data.update({'location': '1'})
|
||||
if 'auth' in kwargs and isinstance(kwargs['auth'], NodeAuthSSHKey):
|
||||
data.update({'publicKey': kwargs['auth'].pubkey})
|
||||
if 'ex_configurationData' in kwargs:
|
||||
configurationData = kwargs['ex_configurationData']
|
||||
if configurationData:
|
||||
for key in configurationData.keys():
|
||||
data.update({key: configurationData.get(key)})
|
||||
|
||||
# Send request!
|
||||
resp = self.connection.request(
|
||||
action=REST_BASE + '/instances',
|
||||
headers={'Content-Type': 'application/x-www-form-urlencoded'},
|
||||
method='POST',
|
||||
data=data).object
|
||||
return self._to_nodes(resp)[0]
|
||||
|
||||
def create_volume(self, size, name, location, **kwargs):
|
||||
"""
|
||||
Create a new block storage volume (virtual disk)
|
||||
|
||||
:param size: Size of volume in gigabytes (required).
|
||||
Find out the possible sizes from the
|
||||
offerings/storage REST interface
|
||||
:type size: ``int``
|
||||
|
||||
:keyword name: Name of the volume to be created (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword location: Which data center to create a volume in. If
|
||||
empty, it will fail for IBM SmartCloud Enterprise
|
||||
(required)
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:keyword snapshot: Not supported for IBM SmartCloud Enterprise
|
||||
:type snapshot: ``str``
|
||||
|
||||
:keyword kwargs.format: Either RAW or EXT3 for IBM SmartCloud
|
||||
Enterprise (optional)
|
||||
:type kwargs.format: ``str``
|
||||
|
||||
:keyword kwargs.offering_id: The storage offering ID for IBM
|
||||
SmartCloud Enterprise
|
||||
Find this from the REST interface
|
||||
storage/offerings. (optional)
|
||||
:type kwargs.offering_id: ``str``
|
||||
|
||||
:keyword kwargs.source_disk_id: If cloning a volume, the storage
|
||||
disk to make a copy from (optional)
|
||||
:type kwargs.source_disk_id: ``str``
|
||||
|
||||
:keyword kwargs.storage_area_id: The id of the storage availability
|
||||
area to create the volume in
|
||||
(optional)
|
||||
:type kwargs.storage_area_id: ``str``
|
||||
|
||||
:keyword kwargs.target_location_id: If cloning a volume, the
|
||||
storage disk to make a copy
|
||||
from (optional)
|
||||
:type kwargs.target_location_id: ``str``
|
||||
|
||||
:return: The newly created :class:`StorageVolume`.
|
||||
:rtype: :class:`StorageVolume`
|
||||
"""
|
||||
data = {}
|
||||
data.update({'name': name})
|
||||
data.update({'size': size})
|
||||
data.update({'location': location})
|
||||
if (('format' in kwargs) and (kwargs['format'] is not None)):
|
||||
data.update({'format': kwargs['format']})
|
||||
if (('offering_id' in kwargs) and (kwargs['offering_id'] is not None)):
|
||||
data.update({'offeringID': kwargs['offering_id']})
|
||||
if (('storage_area_id' in kwargs) and
|
||||
(kwargs['storage_area_id'] is not None)):
|
||||
data.update({'storageAreaID': kwargs['storage_area_id']})
|
||||
if 'source_disk_id' in kwargs:
|
||||
data.update({'sourceDiskID': kwargs['source_disk_id']})
|
||||
data.update({'type': 'clone'})
|
||||
if 'target_location_id' in kwargs:
|
||||
data.update({'targetLocationID': kwargs['target_location_id']})
|
||||
resp = self.connection.request(
|
||||
action=REST_BASE + '/storage',
|
||||
headers={'Content-Type': 'application/x-www-form-urlencoded'},
|
||||
method='POST',
|
||||
data=data).object
|
||||
return self._to_volumes(resp)[0]
|
||||
|
||||
def create_image(self, name, description=None, **kwargs):
|
||||
"""
|
||||
Create a new node image from an existing volume or image.
|
||||
|
||||
:param name: Name of the image to be created (required)
|
||||
:type name: ``str``
|
||||
|
||||
:param description: Description of the image to be created
|
||||
:type description: ``str``
|
||||
|
||||
:keyword image_id: The ID of the source image if cloning the image
|
||||
:type image_id: ``str``
|
||||
|
||||
:keyword volume_id: The ID of the storage volume if
|
||||
importing the image
|
||||
:type volume_id: ``str``
|
||||
|
||||
:return: The newly created :class:`NodeImage`.
|
||||
:rtype: :class:`NodeImage`
|
||||
"""
|
||||
data = {}
|
||||
data.update({'name': name})
|
||||
if description is not None:
|
||||
data.update({'description': description})
|
||||
if (('image_id' in kwargs) and (kwargs['image_id'] is not None)):
|
||||
data.update({'imageId': kwargs['image_id']})
|
||||
if (('volume_id' in kwargs) and (kwargs['volume_id'] is not None)):
|
||||
data.update({'volumeId': kwargs['volume_id']})
|
||||
resp = self.connection.request(
|
||||
action=REST_BASE + '/offerings/image',
|
||||
headers={'Content-Type': 'application/x-www-form-urlencoded'},
|
||||
method='POST',
|
||||
data=data).object
|
||||
return self._to_images(resp)[0]
|
||||
|
||||
def destroy_node(self, node):
|
||||
url = REST_BASE + '/instances/%s' % (node.id)
|
||||
status = int(self.connection.request(action=url,
|
||||
method='DELETE').status)
|
||||
return status == httplib.OK
|
||||
|
||||
def destroy_volume(self, volume):
|
||||
"""
|
||||
Destroys a storage volume.
|
||||
|
||||
:param volume: Volume to be destroyed
|
||||
:type volume: :class:`StorageVolume`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
url = REST_BASE + '/storage/%s' % (volume.id)
|
||||
status = int(self.connection.request(action=url,
|
||||
method='DELETE').status)
|
||||
return status == httplib.OK
|
||||
|
||||
def ex_destroy_image(self, image):
|
||||
"""
|
||||
Destroys an image.
|
||||
|
||||
:param image: Image to be destroyed
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:return: ``bool``
|
||||
"""
|
||||
|
||||
url = REST_BASE + '/offerings/image/%s' % (image.id)
|
||||
status = int(self.connection.request(action=url,
|
||||
method='DELETE').status)
|
||||
return status == 200
|
||||
|
||||
def attach_volume(self, node, volume):
|
||||
"""
|
||||
Attaches volume to node.
|
||||
|
||||
:param node: Node to attach volume to
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param volume: Volume to attach
|
||||
:type volume: :class:`StorageVolume`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
url = REST_BASE + '/instances/%s' % (node.id)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
data = {'storageID': volume.id, 'type': 'attach'}
|
||||
resp = self.connection.request(action=url,
|
||||
method='PUT',
|
||||
headers=headers,
|
||||
data=data)
|
||||
return int(resp.status) == 200
|
||||
|
||||
def detach_volume(self, node, volume):
|
||||
"""
|
||||
Detaches a volume from a node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param volume: Volume to be detached
|
||||
:type volume: :class:`StorageVolume`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
url = REST_BASE + '/instances/%s' % (node.id)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
data = {'storageID': volume.id, 'type': 'detach'}
|
||||
resp = self.connection.request(action=url,
|
||||
method='PUT',
|
||||
headers=headers,
|
||||
data=data)
|
||||
return int(resp.status) == 200
|
||||
|
||||
def reboot_node(self, node):
|
||||
url = REST_BASE + '/instances/%s' % (node.id)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
data = {'state': 'restart'}
|
||||
|
||||
resp = self.connection.request(action=url,
|
||||
method='PUT',
|
||||
headers=headers,
|
||||
data=data)
|
||||
return int(resp.status) == 200
|
||||
|
||||
def list_nodes(self):
|
||||
return self._to_nodes(
|
||||
self.connection.request(REST_BASE + '/instances').object)
|
||||
|
||||
def list_images(self, location=None):
|
||||
return self._to_images(
|
||||
self.connection.request(REST_BASE + '/offerings/image').object)
|
||||
|
||||
def list_volumes(self):
|
||||
"""
|
||||
List storage volumes.
|
||||
|
||||
:rtype: ``list`` of :class:`StorageVolume`
|
||||
"""
|
||||
return self._to_volumes(
|
||||
self.connection.request(REST_BASE + '/storage').object)
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
"""
|
||||
Returns a generic list of sizes. See list_images() for a list of
|
||||
supported sizes for specific images. In particular, you need to have
|
||||
a size that matches the architecture (32-bit vs 64-bit) of the virtual
|
||||
machine image operating system.
|
||||
|
||||
@inherits: :class:`NodeDriver.list_sizes`
|
||||
"""
|
||||
return [
|
||||
NodeSize('BRZ32.1/2048/60*175', 'Bronze 32 bit', None, None, None,
|
||||
None, self.connection.driver),
|
||||
NodeSize('BRZ64.2/4096/60*500*350', 'Bronze 64 bit', None, None,
|
||||
None, None, self.connection.driver),
|
||||
NodeSize('COP32.1/2048/60', 'Copper 32 bit', None, None, None,
|
||||
None, self.connection.driver),
|
||||
NodeSize('COP64.2/4096/60', 'Copper 64 bit', None, None, None,
|
||||
None, self.connection.driver),
|
||||
NodeSize('SLV32.2/4096/60*350', 'Silver 32 bit', None, None, None,
|
||||
None, self.connection.driver),
|
||||
NodeSize('SLV64.4/8192/60*500*500', 'Silver 64 bit', None, None,
|
||||
None, None, self.connection.driver),
|
||||
NodeSize('GLD32.4/4096/60*350', 'Gold 32 bit', None, None, None,
|
||||
None, self.connection.driver),
|
||||
NodeSize('GLD64.8/16384/60*500*500', 'Gold 64 bit', None, None,
|
||||
None, None, self.connection.driver),
|
||||
NodeSize('PLT64.16/16384/60*500*500*500*500', 'Platinum 64 bit',
|
||||
None, None, None, None, self.connection.driver)]
|
||||
|
||||
def list_locations(self):
|
||||
return self._to_locations(
|
||||
self.connection.request(REST_BASE + '/locations').object)
|
||||
|
||||
def ex_list_storage_offerings(self):
|
||||
"""
|
||||
List the storage center offerings
|
||||
|
||||
:rtype: ``list`` of :class:`VolumeOffering`
|
||||
"""
|
||||
return self._to_volume_offerings(
|
||||
self.connection.request(REST_BASE + '/offerings/storage').object)
|
||||
|
||||
def ex_allocate_address(self, location_id, offering_id, vlan_id=None):
|
||||
"""
|
||||
Allocate a new reserved IP address
|
||||
|
||||
:param location_id: Target data center
|
||||
:type location_id: ``str``
|
||||
|
||||
:param offering_id: Offering ID for address to create
|
||||
:type offering_id: ``str``
|
||||
|
||||
:param vlan_id: ID of target VLAN
|
||||
:type vlan_id: ``str``
|
||||
|
||||
:return: :class:`Address` object
|
||||
:rtype: :class:`Address`
|
||||
"""
|
||||
url = REST_BASE + '/addresses'
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
data = {'location': location_id, 'offeringID': offering_id}
|
||||
if vlan_id is not None:
|
||||
data.update({'vlanID': vlan_id})
|
||||
resp = self.connection.request(action=url,
|
||||
method='POST',
|
||||
headers=headers,
|
||||
data=data).object
|
||||
return self._to_addresses(resp)[0]
|
||||
|
||||
def ex_list_addresses(self, resource_id=None):
|
||||
"""
|
||||
List the reserved IP addresses
|
||||
|
||||
:param resource_id: If this is supplied only a single address will
|
||||
be returned (optional)
|
||||
:type resource_id: ``str``
|
||||
|
||||
:rtype: ``list`` of :class:`Address`
|
||||
"""
|
||||
url = REST_BASE + '/addresses'
|
||||
if resource_id:
|
||||
url += '/' + resource_id
|
||||
return self._to_addresses(self.connection.request(url).object)
|
||||
|
||||
def ex_copy_to(self, image, volume):
|
||||
"""
|
||||
Copies a node image to a storage volume
|
||||
|
||||
:param image: source image to copy
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:param volume: Target storage volume to copy to
|
||||
:type volume: :class:`StorageVolume`
|
||||
|
||||
:return: ``bool`` The success of the operation
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
url = REST_BASE + '/storage/%s' % (volume.id)
|
||||
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
|
||||
data = {'imageId': image.id}
|
||||
resp = self.connection.request(action=url,
|
||||
method='PUT',
|
||||
headers=headers,
|
||||
data=data)
|
||||
return int(resp.status) == 200
|
||||
|
||||
def ex_delete_address(self, resource_id):
|
||||
"""
|
||||
Delete a reserved IP address
|
||||
|
||||
:param resource_id: The address to delete (required)
|
||||
:type resource_id: ``str``
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
url = REST_BASE + '/addresses/' + resource_id
|
||||
status = int(self.connection.request(action=url,
|
||||
method='DELETE').status)
|
||||
return status == 200
|
||||
|
||||
def ex_wait_storage_state(self, volume, state=VolumeState.DETACHED,
|
||||
wait_period=60, timeout=1200):
|
||||
"""
|
||||
Block until storage volume state changes to the given value
|
||||
|
||||
:param volume: Storage volume.
|
||||
:type volume: :class:`StorageVolume`
|
||||
|
||||
:param state: The target state to wait for
|
||||
:type state: ``int``
|
||||
|
||||
:param wait_period: How many seconds to between each loop
|
||||
iteration (default is 3)
|
||||
:type wait_period: ``int``
|
||||
|
||||
:param timeout: How many seconds to wait before timing out
|
||||
(default is 1200)
|
||||
:type timeout: ``int``
|
||||
|
||||
:rtype: :class:`StorageVolume`
|
||||
"""
|
||||
start = time.time()
|
||||
end = start + timeout
|
||||
|
||||
while time.time() < end:
|
||||
volumes = self.list_volumes()
|
||||
volumes = list([v for v in volumes if v.uuid == volume.uuid])
|
||||
|
||||
if (len(volumes) == 1 and volumes[0].extra['state'] == state):
|
||||
return volumes[0]
|
||||
else:
|
||||
time.sleep(wait_period)
|
||||
continue
|
||||
|
||||
raise LibcloudError(value='Timed out after %d seconds' % (timeout),
|
||||
driver=self)
|
||||
|
||||
def _to_nodes(self, object):
|
||||
return [self._to_node(instance) for instance in
|
||||
object.findall('Instance')]
|
||||
|
||||
def _to_node(self, instance):
|
||||
public_ips = []
|
||||
|
||||
ip = instance.findtext('IP')
|
||||
if ip:
|
||||
public_ips.append(ip)
|
||||
|
||||
return Node(
|
||||
id=instance.findtext('ID'),
|
||||
name=instance.findtext('Name'),
|
||||
state=self.NODE_STATE_MAP[int(instance.findtext('Status'))],
|
||||
public_ips=public_ips,
|
||||
private_ips=[],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
def _to_images(self, object):
|
||||
# Converts data retrieved from SCE /offerings/image REST call to
|
||||
# a NodeImage
|
||||
return [self._to_image(image) for image in object.findall('Image')]
|
||||
|
||||
def _to_image(self, image):
|
||||
# Converts an SCE Image object to a NodeImage
|
||||
imageID = image.findtext('ID')
|
||||
imageName = image.findtext('Name')
|
||||
parametersURL = image.findtext('Manifest')
|
||||
location = image.findtext('Location')
|
||||
state = image.findtext('State')
|
||||
owner = image.findtext('Owner')
|
||||
visibility = image.findtext('Visibility')
|
||||
platform = image.findtext('Platform')
|
||||
description = image.findtext('Description')
|
||||
documentation = image.findtext('Documentation')
|
||||
instanceTypes = image.findall('SupportedInstanceTypes')
|
||||
nodeSizes = self._to_node_sizes(image.find('SupportedInstanceTypes'))
|
||||
return NodeImage(id=imageID,
|
||||
name=imageName,
|
||||
driver=self.connection.driver,
|
||||
extra={
|
||||
'parametersURL': parametersURL,
|
||||
'location': location,
|
||||
'state': state,
|
||||
'owner': owner,
|
||||
'visibility': visibility,
|
||||
'platform': platform,
|
||||
'description': description,
|
||||
'documentation': documentation,
|
||||
'instanceTypes': instanceTypes,
|
||||
'node_sizes': nodeSizes
|
||||
}
|
||||
)
|
||||
|
||||
def _to_locations(self, object):
|
||||
return [self._to_location(location) for location in
|
||||
object.findall('Location')]
|
||||
|
||||
def _to_location(self, location):
|
||||
# Converts an SCE Location object to a Libcloud NodeLocation object
|
||||
name_text = location.findtext('Name')
|
||||
description = location.findtext('Description')
|
||||
state = location.findtext('State')
|
||||
(nameVal, separator, countryVal) = name_text.partition(',')
|
||||
capabiltyElements = location.findall('Capabilities/Capability')
|
||||
capabilities = {}
|
||||
for elem in capabiltyElements:
|
||||
capabilityID = elem.attrib['id']
|
||||
entryElements = elem.findall('Entry')
|
||||
entries = []
|
||||
for entryElem in entryElements:
|
||||
key = entryElem.attrib['key']
|
||||
valueElements = elem.findall('Value')
|
||||
values = []
|
||||
for valueElem in valueElements:
|
||||
values.append(valueElem.text)
|
||||
entry = {'key': key, 'values': values}
|
||||
entries.append(entry)
|
||||
capabilities[capabilityID] = entries
|
||||
extra = {'description': description, 'state': state,
|
||||
'capabilities': capabilities}
|
||||
return IBMNodeLocation(id=location.findtext('ID'),
|
||||
name=nameVal,
|
||||
country=countryVal.strip(),
|
||||
driver=self.connection.driver,
|
||||
extra=extra)
|
||||
|
||||
def _to_node_sizes(self, object):
|
||||
# Converts SCE SupportedInstanceTypes object to
|
||||
# a list of Libcloud NodeSize objects
|
||||
return [self._to_node_size(iType) for iType in
|
||||
object.findall('InstanceType')]
|
||||
|
||||
def _to_node_size(self, object):
|
||||
# Converts to an SCE InstanceType to a Libcloud NodeSize
|
||||
return NodeSize(object.findtext('ID'),
|
||||
object.findtext('Label'),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
object.findtext('Price/Rate'),
|
||||
self.connection.driver)
|
||||
|
||||
def _to_volumes(self, object):
|
||||
return [self._to_volume(iType) for iType in
|
||||
object.findall('Volume')]
|
||||
|
||||
def _to_volume(self, object):
|
||||
# Converts an SCE Volume to a Libcloud StorageVolume
|
||||
extra = {'state': object.findtext('State'),
|
||||
'location': object.findtext('Location'),
|
||||
'instanceID': object.findtext('instanceID'),
|
||||
'owner': object.findtext('Owner'),
|
||||
'format': object.findtext('Format'),
|
||||
'createdTime': object.findtext('CreatedTime'),
|
||||
'storageAreaID': object.findtext('StorageArea/ID')}
|
||||
return StorageVolume(object.findtext('ID'),
|
||||
object.findtext('Name'),
|
||||
object.findtext('Size'),
|
||||
self.connection.driver,
|
||||
extra)
|
||||
|
||||
def _to_volume_offerings(self, object):
|
||||
return [self._to_volume_offering(iType) for iType in
|
||||
object.findall('Offerings')]
|
||||
|
||||
def _to_volume_offering(self, object):
|
||||
# Converts an SCE DescribeVolumeOfferingsResponse/Offerings XML object
|
||||
# to an SCE VolumeOffering
|
||||
extra = {'label': object.findtext('Label'),
|
||||
'supported_sizes': object.findtext('SupportedSizes'),
|
||||
'formats': object.findall('SupportedFormats/Format/ID'),
|
||||
'price': object.findall('Price')}
|
||||
return VolumeOffering(object.findtext('ID'),
|
||||
object.findtext('Name'),
|
||||
object.findtext('Location'),
|
||||
extra)
|
||||
|
||||
def _to_addresses(self, object):
|
||||
# Converts an SCE DescribeAddressesResponse XML object to a list of
|
||||
# Address objects
|
||||
return [self._to_address(iType) for iType in
|
||||
object.findall('Address')]
|
||||
|
||||
def _to_address(self, object):
|
||||
# Converts an SCE DescribeAddressesResponse/Address XML object to
|
||||
# an Address object
|
||||
extra = {'location': object.findtext('Location'),
|
||||
'type': object.findtext('Label'),
|
||||
'created_time': object.findtext('SupportedSizes'),
|
||||
'hostname': object.findtext('Hostname'),
|
||||
'instance_ids': object.findtext('InstanceID'),
|
||||
'vlan': object.findtext('VLAN'),
|
||||
'owner': object.findtext('owner'),
|
||||
'mode': object.findtext('Mode'),
|
||||
'offering_id': object.findtext('OfferingID')}
|
||||
return Address(object.findtext('ID'),
|
||||
object.findtext('IP'),
|
||||
object.findtext('State'),
|
||||
extra)
|
||||
31
awx/lib/site-packages/libcloud/compute/drivers/ikoula.py
Normal file
31
awx/lib/site-packages/libcloud/compute/drivers/ikoula.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
|
||||
|
||||
__all__ = [
|
||||
'IkoulaNodeDriver'
|
||||
]
|
||||
|
||||
|
||||
class IkoulaNodeDriver(CloudStackNodeDriver):
|
||||
type = Provider.IKOULA
|
||||
name = 'Ikoula'
|
||||
website = 'http://express.ikoula.co.uk/cloudstack'
|
||||
|
||||
# API endpoint info
|
||||
host = 'cloudstack.ikoula.com'
|
||||
path = '/client/api'
|
||||
222
awx/lib/site-packages/libcloud/compute/drivers/joyent.py
Normal file
222
awx/lib/site-packages/libcloud/compute/drivers/joyent.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Joyent Cloud (http://www.joyentcloud.com) driver.
|
||||
"""
|
||||
|
||||
import base64
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except:
|
||||
import json
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.types import LibcloudError
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
|
||||
from libcloud.compute.types import NodeState, InvalidCredsError
|
||||
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize
|
||||
from libcloud.utils.networking import is_private_subnet
|
||||
|
||||
API_HOST_SUFFIX = '.api.joyentcloud.com'
|
||||
API_VERSION = '~6.5'
|
||||
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'provisioning': NodeState.PENDING,
|
||||
'running': NodeState.RUNNING,
|
||||
'stopping': NodeState.TERMINATED,
|
||||
'stopped': NodeState.TERMINATED,
|
||||
'deleted': NodeState.TERMINATED
|
||||
}
|
||||
|
||||
VALID_REGIONS = ['us-east-1', 'us-west-1', 'us-sw-1', 'eu-ams-1']
|
||||
DEFAULT_REGION = 'us-east-1'
|
||||
|
||||
|
||||
class JoyentResponse(JsonResponse):
|
||||
"""
|
||||
Joyent response class.
|
||||
"""
|
||||
|
||||
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
|
||||
httplib.NO_CONTENT]
|
||||
|
||||
def parse_error(self):
|
||||
if self.status == httplib.UNAUTHORIZED:
|
||||
data = self.parse_body()
|
||||
raise InvalidCredsError(data['code'] + ': ' + data['message'])
|
||||
return self.body
|
||||
|
||||
def success(self):
|
||||
return self.status in self.valid_response_codes
|
||||
|
||||
|
||||
class JoyentConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Joyent connection class.
|
||||
"""
|
||||
|
||||
responseCls = JoyentResponse
|
||||
|
||||
allow_insecure = False
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Accept'] = 'application/json'
|
||||
headers['Content-Type'] = 'application/json; charset=UTF-8'
|
||||
headers['X-Api-Version'] = API_VERSION
|
||||
|
||||
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
|
||||
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
|
||||
return headers
|
||||
|
||||
|
||||
class JoyentNodeDriver(NodeDriver):
|
||||
"""
|
||||
Joyent node driver class.
|
||||
"""
|
||||
|
||||
type = Provider.JOYENT
|
||||
name = 'Joyent'
|
||||
website = 'http://www.joyentcloud.com'
|
||||
connectionCls = JoyentConnection
|
||||
features = {'create_node': ['generates_password']}
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
region=DEFAULT_REGION, **kwargs):
|
||||
# Location is here for backward compatibility reasons
|
||||
if 'location' in kwargs:
|
||||
region = kwargs['location']
|
||||
|
||||
if region not in VALID_REGIONS:
|
||||
msg = 'Invalid region: "%s". Valid region: %s'
|
||||
raise LibcloudError(msg % (region,
|
||||
', '.join(VALID_REGIONS)), driver=self)
|
||||
|
||||
super(JoyentNodeDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port, region=region,
|
||||
**kwargs)
|
||||
self.connection.host = region + API_HOST_SUFFIX
|
||||
|
||||
def list_images(self):
|
||||
result = self.connection.request('/my/datasets').object
|
||||
|
||||
images = []
|
||||
for value in result:
|
||||
extra = {'type': value['type'], 'urn': value['urn'],
|
||||
'os': value['os'], 'default': value['default']}
|
||||
image = NodeImage(id=value['id'], name=value['name'],
|
||||
driver=self.connection.driver, extra=extra)
|
||||
images.append(image)
|
||||
|
||||
return images
|
||||
|
||||
def list_sizes(self):
|
||||
result = self.connection.request('/my/packages').object
|
||||
|
||||
sizes = []
|
||||
for value in result:
|
||||
size = NodeSize(id=value['name'], name=value['name'],
|
||||
ram=value['memory'], disk=value['disk'],
|
||||
bandwidth=None, price=0.0,
|
||||
driver=self.connection.driver)
|
||||
sizes.append(size)
|
||||
|
||||
return sizes
|
||||
|
||||
def list_nodes(self):
|
||||
result = self.connection.request('/my/machines').object
|
||||
|
||||
nodes = []
|
||||
for value in result:
|
||||
node = self._to_node(value)
|
||||
nodes.append(node)
|
||||
|
||||
return nodes
|
||||
|
||||
def reboot_node(self, node):
|
||||
data = json.dumps({'action': 'reboot'})
|
||||
result = self.connection.request('/my/machines/%s' % (node.id),
|
||||
data=data, method='POST')
|
||||
return result.status == httplib.ACCEPTED
|
||||
|
||||
def destroy_node(self, node):
|
||||
result = self.connection.request('/my/machines/%s' % (node.id),
|
||||
method='DELETE')
|
||||
return result.status == httplib.NO_CONTENT
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
name = kwargs['name']
|
||||
size = kwargs['size']
|
||||
image = kwargs['image']
|
||||
|
||||
data = json.dumps({'name': name, 'package': size.id,
|
||||
'dataset': image.id})
|
||||
result = self.connection.request('/my/machines', data=data,
|
||||
method='POST')
|
||||
return self._to_node(result.object)
|
||||
|
||||
def ex_stop_node(self, node):
|
||||
"""
|
||||
Stop node
|
||||
|
||||
:param node: The node to be stopped
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
data = json.dumps({'action': 'stop'})
|
||||
result = self.connection.request('/my/machines/%s' % (node.id),
|
||||
data=data, method='POST')
|
||||
return result.status == httplib.ACCEPTED
|
||||
|
||||
def ex_start_node(self, node):
|
||||
"""
|
||||
Start node
|
||||
|
||||
:param node: The node to be stopped
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
data = json.dumps({'action': 'start'})
|
||||
result = self.connection.request('/my/machines/%s' % (node.id),
|
||||
data=data, method='POST')
|
||||
return result.status == httplib.ACCEPTED
|
||||
|
||||
def _to_node(self, data):
|
||||
state = NODE_STATE_MAP[data['state']]
|
||||
public_ips = []
|
||||
private_ips = []
|
||||
extra = {}
|
||||
|
||||
for ip in data['ips']:
|
||||
if is_private_subnet(ip):
|
||||
private_ips.append(ip)
|
||||
else:
|
||||
public_ips.append(ip)
|
||||
|
||||
if 'credentials' in data['metadata']:
|
||||
extra['password'] = data['metadata']['credentials']['root']
|
||||
|
||||
node = Node(id=data['id'], name=data['name'], state=state,
|
||||
public_ips=public_ips, private_ips=private_ips,
|
||||
driver=self.connection.driver, extra=extra)
|
||||
return node
|
||||
87
awx/lib/site-packages/libcloud/compute/drivers/kili.py
Normal file
87
awx/lib/site-packages/libcloud/compute/drivers/kili.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
HP Public cloud driver which is esentially just a small wrapper around
|
||||
OpenStack driver.
|
||||
"""
|
||||
|
||||
from libcloud.compute.types import Provider, LibcloudError
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver
|
||||
|
||||
__all__ = [
|
||||
'KiliCloudNodeDriver'
|
||||
]
|
||||
|
||||
ENDPOINT_ARGS = {
|
||||
'service_type': 'compute',
|
||||
'name': 'nova',
|
||||
'region': 'RegionOne'
|
||||
}
|
||||
|
||||
AUTH_URL = 'https://api.kili.io/keystone/v2.0/tokens'
|
||||
|
||||
|
||||
class KiliCloudConnection(OpenStack_1_1_Connection):
|
||||
_auth_version = '2.0_password'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.region = kwargs.pop('region', None)
|
||||
self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
|
||||
super(KiliCloudConnection, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_endpoint(self):
|
||||
if not self.get_endpoint_args:
|
||||
raise LibcloudError(
|
||||
'KiliCloudConnection must have get_endpoint_args set')
|
||||
|
||||
if '2.0_password' in self._auth_version:
|
||||
ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
|
||||
else:
|
||||
raise LibcloudError(
|
||||
'Auth version "%s" not supported' % (self._auth_version))
|
||||
|
||||
public_url = ep.get('publicURL', None)
|
||||
|
||||
if not public_url:
|
||||
raise LibcloudError('Could not find specified endpoint')
|
||||
|
||||
return public_url
|
||||
|
||||
|
||||
class KiliCloudNodeDriver(OpenStack_1_1_NodeDriver):
|
||||
name = 'Kili Public Cloud'
|
||||
website = 'http://kili.io/'
|
||||
connectionCls = KiliCloudConnection
|
||||
type = Provider.HPCLOUD
|
||||
|
||||
def __init__(self, key, secret, tenant_name, secure=True,
|
||||
host=None, port=None, **kwargs):
|
||||
"""
|
||||
Note: tenant_name argument is required for Kili cloud.
|
||||
"""
|
||||
self.tenant_name = tenant_name
|
||||
super(KiliCloudNodeDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port,
|
||||
**kwargs)
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
kwargs = self.openstack_connection_kwargs()
|
||||
kwargs['get_endpoint_args'] = ENDPOINT_ARGS
|
||||
kwargs['ex_force_auth_url'] = AUTH_URL
|
||||
kwargs['ex_tenant_name'] = self.tenant_name
|
||||
|
||||
return kwargs
|
||||
103
awx/lib/site-packages/libcloud/compute/drivers/ktucloud.py
Normal file
103
awx/lib/site-packages/libcloud/compute/drivers/ktucloud.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.base import Node, NodeImage, NodeSize
|
||||
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
|
||||
|
||||
|
||||
class KTUCloudNodeDriver(CloudStackNodeDriver):
|
||||
"Driver for KTUCloud Compute platform."
|
||||
|
||||
EMPTY_DISKOFFERINGID = '0'
|
||||
type = Provider.KTUCLOUD
|
||||
name = 'KTUCloud'
|
||||
website = 'https://ucloudbiz.olleh.com/'
|
||||
|
||||
def list_images(self, location=None):
|
||||
args = {
|
||||
'templatefilter': 'executable'
|
||||
}
|
||||
if location is not None:
|
||||
args['zoneid'] = location.id
|
||||
|
||||
imgs = self._sync_request(command='listAvailableProductTypes',
|
||||
method='GET')
|
||||
images = []
|
||||
|
||||
for img in imgs['producttypes']:
|
||||
images.append(
|
||||
NodeImage(
|
||||
img['serviceofferingid'],
|
||||
img['serviceofferingdesc'],
|
||||
self,
|
||||
{'hypervisor': '',
|
||||
'format': '',
|
||||
'os': img['templatedesc'],
|
||||
'templateid': img['templateid'],
|
||||
'zoneid': img['zoneid']}
|
||||
)
|
||||
)
|
||||
|
||||
return images
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
szs = self._sync_request('listAvailableProductTypes')
|
||||
sizes = []
|
||||
for sz in szs['producttypes']:
|
||||
diskofferingid = sz.get('diskofferingid',
|
||||
self.EMPTY_DISKOFFERINGID)
|
||||
sizes.append(NodeSize(
|
||||
diskofferingid,
|
||||
sz['diskofferingdesc'],
|
||||
0, 0, 0, 0, self)
|
||||
)
|
||||
return sizes
|
||||
|
||||
def create_node(self, name, size, image, location=None, **kwargs):
|
||||
params = {'displayname': name,
|
||||
'serviceofferingid': image.id,
|
||||
'templateid': str(image.extra['templateid']),
|
||||
'zoneid': str(image.extra['zoneid'])}
|
||||
|
||||
usageplantype = kwargs.pop('usageplantype', None)
|
||||
if usageplantype is None:
|
||||
params['usageplantype'] = 'hourly'
|
||||
else:
|
||||
params['usageplantype'] = usageplantype
|
||||
|
||||
if size.id != self.EMPTY_DISKOFFERINGID:
|
||||
params['diskofferingid'] = size.id
|
||||
|
||||
result = self._async_request(
|
||||
command='deployVirtualMachine',
|
||||
params=params,
|
||||
method='GET')
|
||||
|
||||
node = result['virtualmachine']
|
||||
|
||||
return Node(
|
||||
id=node['id'],
|
||||
name=node['displayname'],
|
||||
state=self.NODE_STATE_MAP[node['state']],
|
||||
public_ips=[],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
extra={
|
||||
'zoneid': image.extra['zoneid'],
|
||||
'ip_addresses': [],
|
||||
'forwarding_rules': [],
|
||||
}
|
||||
)
|
||||
335
awx/lib/site-packages/libcloud/compute/drivers/libvirt_driver.py
Normal file
335
awx/lib/site-packages/libcloud/compute/drivers/libvirt_driver.py
Normal file
@@ -0,0 +1,335 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import with_statement
|
||||
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
import platform
|
||||
import subprocess
|
||||
import mimetypes
|
||||
|
||||
from os.path import join as pjoin
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from libcloud.compute.base import NodeDriver, Node
|
||||
from libcloud.compute.base import NodeState
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.utils.networking import is_public_subnet
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
have_libvirt = True
|
||||
except ImportError:
|
||||
have_libvirt = False
|
||||
|
||||
|
||||
class LibvirtNodeDriver(NodeDriver):
|
||||
"""
|
||||
Libvirt (http://libvirt.org/) node driver.
|
||||
|
||||
To enable debug mode, set LIBVIR_DEBUG environment variable.
|
||||
"""
|
||||
|
||||
type = Provider.LIBVIRT
|
||||
name = 'Libvirt'
|
||||
website = 'http://libvirt.org/'
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
0: NodeState.TERMINATED, # no state
|
||||
1: NodeState.RUNNING, # domain is running
|
||||
2: NodeState.PENDING, # domain is blocked on resource
|
||||
3: NodeState.TERMINATED, # domain is paused by user
|
||||
4: NodeState.TERMINATED, # domain is being shut down
|
||||
5: NodeState.TERMINATED, # domain is shut off
|
||||
6: NodeState.UNKNOWN, # domain is crashed
|
||||
7: NodeState.UNKNOWN, # domain is suspended by guest power management
|
||||
}
|
||||
|
||||
def __init__(self, uri):
|
||||
"""
|
||||
:param uri: Hypervisor URI (e.g. vbox:///session, qemu:///system,
|
||||
etc.).
|
||||
:type uri: ``str``
|
||||
"""
|
||||
if not have_libvirt:
|
||||
raise RuntimeError('Libvirt driver requires \'libvirt\' Python ' +
|
||||
'package')
|
||||
|
||||
self._uri = uri
|
||||
self.connection = libvirt.open(uri)
|
||||
|
||||
def list_nodes(self):
|
||||
domains = self.connection.listAllDomains()
|
||||
nodes = self._to_nodes(domains=domains)
|
||||
return nodes
|
||||
|
||||
def reboot_node(self, node):
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
return domain.reboot(flags=0) == 0
|
||||
|
||||
def destroy_node(self, node):
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
return domain.destroy() == 0
|
||||
|
||||
def ex_start_node(self, node):
|
||||
"""
|
||||
Start a stopped node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
return domain.create() == 0
|
||||
|
||||
def ex_shutdown_node(self, node):
|
||||
"""
|
||||
Shutdown a running node.
|
||||
|
||||
Note: Usually this will result in sending an ACPI event to the node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
return domain.shutdown() == 0
|
||||
|
||||
def ex_suspend_node(self, node):
|
||||
"""
|
||||
Suspend a running node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
return domain.suspend() == 0
|
||||
|
||||
def ex_resume_node(self, node):
|
||||
"""
|
||||
Resume a suspended node.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
return domain.resume() == 0
|
||||
|
||||
def ex_take_node_screenshot(self, node, directory, screen=0):
|
||||
"""
|
||||
Take a screenshot of a monitoring of a running instance.
|
||||
|
||||
:param node: Node to take the screenshot of.
|
||||
:type node: :class:`libcloud.compute.base.Node`
|
||||
|
||||
:param directory: Path where the screenshot will be saved.
|
||||
:type directory: ``str``
|
||||
|
||||
:param screen: ID of the monitor to take the screenshot of.
|
||||
:type screen: ``int``
|
||||
|
||||
:return: Full path where the screenshot has been saved.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
if not os.path.exists(directory) or not os.path.isdir(directory):
|
||||
raise ValueError('Invalid value for directory argument')
|
||||
|
||||
domain = self._get_domain_for_node(node=node)
|
||||
stream = self.connection.newStream()
|
||||
mime_type = domain.screenshot(stream=stream, screen=0)
|
||||
extensions = mimetypes.guess_all_extensions(type=mime_type)
|
||||
|
||||
if extensions:
|
||||
extension = extensions[0]
|
||||
else:
|
||||
extension = '.png'
|
||||
|
||||
name = 'screenshot-%s%s' % (int(time.time()), extension)
|
||||
file_path = pjoin(directory, name)
|
||||
|
||||
with open(file_path, 'wb') as fp:
|
||||
def write(stream, buf, opaque):
|
||||
fp.write(buf)
|
||||
|
||||
stream.recvAll(write, None)
|
||||
|
||||
try:
|
||||
stream.finish()
|
||||
except Exception:
|
||||
# Finish is not supported by all backends
|
||||
pass
|
||||
|
||||
return file_path
|
||||
|
||||
def ex_get_hypervisor_hostname(self):
|
||||
"""
|
||||
Return a system hostname on which the hypervisor is running.
|
||||
"""
|
||||
hostname = self.connection.getHostname()
|
||||
return hostname
|
||||
|
||||
def ex_get_hypervisor_sysinfo(self):
|
||||
"""
|
||||
Retrieve hypervisor system information.
|
||||
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
xml = self.connection.getSysinfo()
|
||||
etree = ET.XML(xml)
|
||||
|
||||
attributes = ['bios', 'system', 'processor', 'memory_device']
|
||||
|
||||
sysinfo = {}
|
||||
for attribute in attributes:
|
||||
element = etree.find(attribute)
|
||||
entries = self._get_entries(element=element)
|
||||
sysinfo[attribute] = entries
|
||||
|
||||
return sysinfo
|
||||
|
||||
def _to_nodes(self, domains):
|
||||
nodes = [self._to_node(domain=domain) for domain in domains]
|
||||
return nodes
|
||||
|
||||
def _to_node(self, domain):
|
||||
state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
|
||||
state = self.NODE_STATE_MAP.get(state, NodeState.UNKNOWN)
|
||||
|
||||
public_ips, private_ips = [], []
|
||||
|
||||
ip_addresses = self._get_ip_addresses_for_domain(domain)
|
||||
|
||||
for ip_address in ip_addresses:
|
||||
if is_public_subnet(ip_address):
|
||||
public_ips.append(ip_address)
|
||||
else:
|
||||
private_ips.append(ip_address)
|
||||
|
||||
extra = {'uuid': domain.UUIDString(), 'os_type': domain.OSType(),
|
||||
'types': self.connection.getType(),
|
||||
'used_memory': memory / 1024, 'vcpu_count': vcpu_count,
|
||||
'used_cpu_time': used_cpu_time}
|
||||
|
||||
node = Node(id=domain.ID(), name=domain.name(), state=state,
|
||||
public_ips=public_ips, private_ips=private_ips,
|
||||
driver=self, extra=extra)
|
||||
node._uuid = domain.UUIDString() # we want to use a custom UUID
|
||||
return node
|
||||
|
||||
def _get_ip_addresses_for_domain(self, domain):
|
||||
"""
|
||||
Retrieve IP addresses for the provided domain.
|
||||
|
||||
Note: This functionality is currently only supported on Linux and
|
||||
only works if this code is run on the same machine as the VMs run
|
||||
on.
|
||||
|
||||
:return: IP addresses for the provided domain.
|
||||
:rtype: ``list``
|
||||
"""
|
||||
result = []
|
||||
|
||||
if platform.system() != 'Linux':
|
||||
# Only Linux is supported atm
|
||||
return result
|
||||
|
||||
mac_addresses = self._get_mac_addresses_for_domain(domain=domain)
|
||||
|
||||
cmd = ['arp', '-an']
|
||||
child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
stdout, _ = child.communicate()
|
||||
arp_table = self._parse_arp_table(arp_output=stdout)
|
||||
|
||||
for mac_address in mac_addresses:
|
||||
if mac_address in arp_table:
|
||||
ip_addresses = arp_table[mac_address]
|
||||
result.extend(ip_addresses)
|
||||
|
||||
return result
|
||||
|
||||
def _get_mac_addresses_for_domain(self, domain):
|
||||
"""
|
||||
Parses network interface MAC addresses from the provided domain.
|
||||
"""
|
||||
xml = domain.XMLDesc()
|
||||
etree = ET.XML(xml)
|
||||
elems = etree.findall("devices/interface[@type='network']/mac")
|
||||
|
||||
result = []
|
||||
for elem in elems:
|
||||
mac_address = elem.get('address')
|
||||
result.append(mac_address)
|
||||
|
||||
return result
|
||||
|
||||
def _get_domain_for_node(self, node):
|
||||
"""
|
||||
Return libvirt domain object for the provided node.
|
||||
"""
|
||||
domain = self.connection.lookupByUUIDString(node.uuid)
|
||||
return domain
|
||||
|
||||
def _get_entries(self, element):
|
||||
"""
|
||||
Parse entries dictionary.
|
||||
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
elements = element.findall('entry')
|
||||
|
||||
result = {}
|
||||
for element in elements:
|
||||
name = element.get('name')
|
||||
value = element.text
|
||||
result[name] = value
|
||||
|
||||
return result
|
||||
|
||||
def _parse_arp_table(self, arp_output):
|
||||
"""
|
||||
Parse arp command output and return a dictionary which maps mac address
|
||||
to an IP address.
|
||||
|
||||
:return: Dictionary which maps mac address to IP address.
|
||||
:rtype: ``dict``
|
||||
"""
|
||||
lines = arp_output.split('\n')
|
||||
|
||||
arp_table = defaultdict(list)
|
||||
for line in lines:
|
||||
match = re.match('.*?\((.*?)\) at (.*?)\s+', line)
|
||||
|
||||
if not match:
|
||||
continue
|
||||
|
||||
groups = match.groups()
|
||||
ip_address = groups[0]
|
||||
mac_address = groups[1]
|
||||
arp_table[mac_address].append(ip_address)
|
||||
|
||||
return arp_table
|
||||
548
awx/lib/site-packages/libcloud/compute/drivers/linode.py
Normal file
548
awx/lib/site-packages/libcloud/compute/drivers/linode.py
Normal file
@@ -0,0 +1,548 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""libcloud driver for the Linode(R) API
|
||||
|
||||
This driver implements all libcloud functionality for the Linode API.
|
||||
Since the API is a bit more fine-grained, create_node abstracts a significant
|
||||
amount of work (and may take a while to run).
|
||||
|
||||
Linode home page http://www.linode.com/
|
||||
Linode API documentation http://www.linode.com/api/
|
||||
Alternate bindings for reference http://github.com/tjfontaine/linode-python
|
||||
|
||||
Linode(R) is a registered trademark of Linode, LLC.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
import itertools
|
||||
import binascii
|
||||
|
||||
from copy import copy
|
||||
|
||||
from libcloud.utils.py3 import PY3
|
||||
|
||||
from libcloud.common.linode import (API_ROOT, LinodeException,
|
||||
LinodeConnection, LINODE_PLAN_IDS)
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
|
||||
from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
|
||||
from libcloud.compute.base import NodeImage
|
||||
|
||||
|
||||
class LinodeNodeDriver(NodeDriver):
|
||||
"""libcloud driver for the Linode API
|
||||
|
||||
Rough mapping of which is which:
|
||||
|
||||
list_nodes linode.list
|
||||
reboot_node linode.reboot
|
||||
destroy_node linode.delete
|
||||
create_node linode.create, linode.update,
|
||||
linode.disk.createfromdistribution,
|
||||
linode.disk.create, linode.config.create,
|
||||
linode.ip.addprivate, linode.boot
|
||||
list_sizes avail.linodeplans
|
||||
list_images avail.distributions
|
||||
list_locations avail.datacenters
|
||||
|
||||
For more information on the Linode API, be sure to read the reference:
|
||||
|
||||
http://www.linode.com/api/
|
||||
"""
|
||||
type = Provider.LINODE
|
||||
name = "Linode"
|
||||
website = 'http://www.linode.com/'
|
||||
connectionCls = LinodeConnection
|
||||
_linode_plan_ids = LINODE_PLAN_IDS
|
||||
features = {'create_node': ['ssh_key', 'password']}
|
||||
|
||||
def __init__(self, key):
|
||||
"""Instantiate the driver with the given API key
|
||||
|
||||
:param key: the API key to use (required)
|
||||
:type key: ``str``
|
||||
|
||||
:rtype: ``None``
|
||||
"""
|
||||
self.datacenter = None
|
||||
NodeDriver.__init__(self, key)
|
||||
|
||||
# Converts Linode's state from DB to a NodeState constant.
|
||||
LINODE_STATES = {
|
||||
(-2): NodeState.UNKNOWN, # Boot Failed
|
||||
(-1): NodeState.PENDING, # Being Created
|
||||
0: NodeState.PENDING, # Brand New
|
||||
1: NodeState.RUNNING, # Running
|
||||
2: NodeState.TERMINATED, # Powered Off
|
||||
3: NodeState.REBOOTING, # Shutting Down
|
||||
4: NodeState.UNKNOWN # Reserved
|
||||
}
|
||||
|
||||
def list_nodes(self):
|
||||
"""
|
||||
List all Linodes that the API key can access
|
||||
|
||||
This call will return all Linodes that the API key in use has access
|
||||
to.
|
||||
If a node is in this list, rebooting will work; however, creation and
|
||||
destruction are a separate grant.
|
||||
|
||||
:return: List of node objects that the API key can access
|
||||
:rtype: ``list`` of :class:`Node`
|
||||
"""
|
||||
params = {"api_action": "linode.list"}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
return self._to_nodes(data)
|
||||
|
||||
def reboot_node(self, node):
|
||||
"""
|
||||
Reboot the given Linode
|
||||
|
||||
Will issue a shutdown job followed by a boot job, using the last booted
|
||||
configuration. In most cases, this will be the only configuration.
|
||||
|
||||
:param node: the Linode to reboot
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
params = {"api_action": "linode.reboot", "LinodeID": node.id}
|
||||
self.connection.request(API_ROOT, params=params)
|
||||
return True
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""Destroy the given Linode
|
||||
|
||||
Will remove the Linode from the account and issue a prorated credit. A
|
||||
grant for removing Linodes from the account is required, otherwise this
|
||||
method will fail.
|
||||
|
||||
In most cases, all disk images must be removed from a Linode before the
|
||||
Linode can be removed; however, this call explicitly skips those
|
||||
safeguards. There is no going back from this method.
|
||||
|
||||
:param node: the Linode to destroy
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
params = {"api_action": "linode.delete", "LinodeID": node.id,
|
||||
"skipChecks": True}
|
||||
self.connection.request(API_ROOT, params=params)
|
||||
return True
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create a new Linode, deploy a Linux distribution, and boot
|
||||
|
||||
This call abstracts much of the functionality of provisioning a Linode
|
||||
and getting it booted. A global grant to add Linodes to the account is
|
||||
required, as this call will result in a billing charge.
|
||||
|
||||
Note that there is a safety valve of 5 Linodes per hour, in order to
|
||||
prevent a runaway script from ruining your day.
|
||||
|
||||
:keyword name: the name to assign the Linode (mandatory)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword image: which distribution to deploy on the Linode (mandatory)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword size: the plan size to create (mandatory)
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword auth: an SSH key or root password (mandatory)
|
||||
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
|
||||
|
||||
:keyword location: which datacenter to create the Linode in
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:keyword ex_swap: size of the swap partition in MB (128)
|
||||
:type ex_swap: ``int``
|
||||
|
||||
:keyword ex_rsize: size of the root partition in MB (plan size - swap).
|
||||
:type ex_rsize: ``int``
|
||||
|
||||
:keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable).
|
||||
:type ex_kernel: ``str``
|
||||
|
||||
:keyword ex_payment: one of 1, 12, or 24; subscription length (1)
|
||||
:type ex_payment: ``int``
|
||||
|
||||
:keyword ex_comment: a small comment for the configuration (libcloud)
|
||||
:type ex_comment: ``str``
|
||||
|
||||
:keyword ex_private: whether or not to request a private IP (False)
|
||||
:type ex_private: ``bool``
|
||||
|
||||
:keyword lconfig: what to call the configuration (generated)
|
||||
:type lconfig: ``str``
|
||||
|
||||
:keyword lroot: what to call the root image (generated)
|
||||
:type lroot: ``str``
|
||||
|
||||
:keyword lswap: what to call the swap space (generated)
|
||||
:type lswap: ``str``
|
||||
|
||||
:return: Node representing the newly-created Linode
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
name = kwargs["name"]
|
||||
image = kwargs["image"]
|
||||
size = kwargs["size"]
|
||||
auth = self._get_and_check_auth(kwargs["auth"])
|
||||
|
||||
# Pick a location (resolves LIBCLOUD-41 in JIRA)
|
||||
if "location" in kwargs:
|
||||
chosen = kwargs["location"].id
|
||||
elif self.datacenter:
|
||||
chosen = self.datacenter
|
||||
else:
|
||||
raise LinodeException(0xFB, "Need to select a datacenter first")
|
||||
|
||||
# Step 0: Parameter validation before we purchase
|
||||
# We're especially careful here so we don't fail after purchase, rather
|
||||
# than getting halfway through the process and having the API fail.
|
||||
|
||||
# Plan ID
|
||||
plans = self.list_sizes()
|
||||
if size.id not in [p.id for p in plans]:
|
||||
raise LinodeException(0xFB, "Invalid plan ID -- avail.plans")
|
||||
|
||||
# Payment schedule
|
||||
payment = "1" if "ex_payment" not in kwargs else \
|
||||
str(kwargs["ex_payment"])
|
||||
if payment not in ["1", "12", "24"]:
|
||||
raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)")
|
||||
|
||||
ssh = None
|
||||
root = None
|
||||
# SSH key and/or root password
|
||||
if isinstance(auth, NodeAuthSSHKey):
|
||||
ssh = auth.pubkey
|
||||
elif isinstance(auth, NodeAuthPassword):
|
||||
root = auth.password
|
||||
|
||||
if not ssh and not root:
|
||||
raise LinodeException(0xFB, "Need SSH key or root password")
|
||||
if root is not None and len(root) < 6:
|
||||
raise LinodeException(0xFB, "Root password is too short")
|
||||
|
||||
# Swap size
|
||||
try:
|
||||
swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"])
|
||||
except:
|
||||
raise LinodeException(0xFB, "Need an integer swap size")
|
||||
|
||||
# Root partition size
|
||||
imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else\
|
||||
int(kwargs["ex_rsize"])
|
||||
if (imagesize + swap) > size.disk:
|
||||
raise LinodeException(0xFB, "Total disk images are too big")
|
||||
|
||||
# Distribution ID
|
||||
distros = self.list_images()
|
||||
if image.id not in [d.id for d in distros]:
|
||||
raise LinodeException(0xFB,
|
||||
"Invalid distro -- avail.distributions")
|
||||
|
||||
# Kernel
|
||||
if "ex_kernel" in kwargs:
|
||||
kernel = kwargs["ex_kernel"]
|
||||
else:
|
||||
if image.extra['64bit']:
|
||||
# For a list of available kernel ids, see
|
||||
# https://www.linode.com/kernels/
|
||||
kernel = 138
|
||||
else:
|
||||
kernel = 137
|
||||
params = {"api_action": "avail.kernels"}
|
||||
kernels = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
if kernel not in [z["KERNELID"] for z in kernels]:
|
||||
raise LinodeException(0xFB, "Invalid kernel -- avail.kernels")
|
||||
|
||||
# Comments
|
||||
comments = "Created by Apache libcloud <http://www.libcloud.org>" if\
|
||||
"ex_comment" not in kwargs else kwargs["ex_comment"]
|
||||
|
||||
# Step 1: linode.create
|
||||
params = {
|
||||
"api_action": "linode.create",
|
||||
"DatacenterID": chosen,
|
||||
"PlanID": size.id,
|
||||
"PaymentTerm": payment
|
||||
}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
linode = {"id": data["LinodeID"]}
|
||||
|
||||
# Step 1b. linode.update to rename the Linode
|
||||
params = {
|
||||
"api_action": "linode.update",
|
||||
"LinodeID": linode["id"],
|
||||
"Label": name
|
||||
}
|
||||
self.connection.request(API_ROOT, params=params)
|
||||
|
||||
# Step 1c. linode.ip.addprivate if it was requested
|
||||
if "ex_private" in kwargs and kwargs["ex_private"]:
|
||||
params = {
|
||||
"api_action": "linode.ip.addprivate",
|
||||
"LinodeID": linode["id"]
|
||||
}
|
||||
self.connection.request(API_ROOT, params=params)
|
||||
|
||||
# Step 1d. Labels
|
||||
# use the linode id as the name can be up to 63 chars and the labels
|
||||
# are limited to 48 chars
|
||||
label = {
|
||||
"lconfig": "[%s] Configuration Profile" % linode["id"],
|
||||
"lroot": "[%s] %s Disk Image" % (linode["id"], image.name),
|
||||
"lswap": "[%s] Swap Space" % linode["id"]
|
||||
}
|
||||
for what in ["lconfig", "lroot", "lswap"]:
|
||||
if what in kwargs:
|
||||
label[what] = kwargs[what]
|
||||
|
||||
# Step 2: linode.disk.createfromdistribution
|
||||
if not root:
|
||||
root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip()
|
||||
|
||||
params = {
|
||||
"api_action": "linode.disk.createfromdistribution",
|
||||
"LinodeID": linode["id"],
|
||||
"DistributionID": image.id,
|
||||
"Label": label["lroot"],
|
||||
"Size": imagesize,
|
||||
"rootPass": root,
|
||||
}
|
||||
if ssh:
|
||||
params["rootSSHKey"] = ssh
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
linode["rootimage"] = data["DiskID"]
|
||||
|
||||
# Step 3: linode.disk.create for swap
|
||||
params = {
|
||||
"api_action": "linode.disk.create",
|
||||
"LinodeID": linode["id"],
|
||||
"Label": label["lswap"],
|
||||
"Type": "swap",
|
||||
"Size": swap
|
||||
}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
linode["swapimage"] = data["DiskID"]
|
||||
|
||||
# Step 4: linode.config.create for main profile
|
||||
disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"])
|
||||
params = {
|
||||
"api_action": "linode.config.create",
|
||||
"LinodeID": linode["id"],
|
||||
"KernelID": kernel,
|
||||
"Label": label["lconfig"],
|
||||
"Comments": comments,
|
||||
"DiskList": disks
|
||||
}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
linode["config"] = data["ConfigID"]
|
||||
|
||||
# Step 5: linode.boot
|
||||
params = {
|
||||
"api_action": "linode.boot",
|
||||
"LinodeID": linode["id"],
|
||||
"ConfigID": linode["config"]
|
||||
}
|
||||
self.connection.request(API_ROOT, params=params)
|
||||
|
||||
# Make a node out of it and hand it back
|
||||
params = {"api_action": "linode.list", "LinodeID": linode["id"]}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
nodes = self._to_nodes(data)
|
||||
|
||||
if len(nodes) == 1:
|
||||
node = nodes[0]
|
||||
if getattr(auth, "generated", False):
|
||||
node.extra['password'] = auth.password
|
||||
return node
|
||||
|
||||
return None
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
"""
|
||||
List available Linode plans
|
||||
|
||||
Gets the sizes that can be used for creating a Linode. Since available
|
||||
Linode plans vary per-location, this method can also be passed a
|
||||
location to filter the availability.
|
||||
|
||||
:keyword location: the facility to retrieve plans in
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:rtype: ``list`` of :class:`NodeSize`
|
||||
"""
|
||||
params = {"api_action": "avail.linodeplans"}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
sizes = []
|
||||
for obj in data:
|
||||
n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"],
|
||||
disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"],
|
||||
price=obj["PRICE"], driver=self.connection.driver)
|
||||
sizes.append(n)
|
||||
return sizes
|
||||
|
||||
def list_images(self):
|
||||
"""
|
||||
List available Linux distributions
|
||||
|
||||
Retrieve all Linux distributions that can be deployed to a Linode.
|
||||
|
||||
:rtype: ``list`` of :class:`NodeImage`
|
||||
"""
|
||||
params = {"api_action": "avail.distributions"}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
distros = []
|
||||
for obj in data:
|
||||
i = NodeImage(id=obj["DISTRIBUTIONID"],
|
||||
name=obj["LABEL"],
|
||||
driver=self.connection.driver,
|
||||
extra={'pvops': obj['REQUIRESPVOPSKERNEL'],
|
||||
'64bit': obj['IS64BIT']})
|
||||
distros.append(i)
|
||||
return distros
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
List available facilities for deployment
|
||||
|
||||
Retrieve all facilities that a Linode can be deployed in.
|
||||
|
||||
:rtype: ``list`` of :class:`NodeLocation`
|
||||
"""
|
||||
params = {"api_action": "avail.datacenters"}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
nl = []
|
||||
for dc in data:
|
||||
country = None
|
||||
if "USA" in dc["LOCATION"]:
|
||||
country = "US"
|
||||
elif "UK" in dc["LOCATION"]:
|
||||
country = "GB"
|
||||
elif "JP" in dc["LOCATION"]:
|
||||
country = "JP"
|
||||
else:
|
||||
country = "??"
|
||||
nl.append(NodeLocation(dc["DATACENTERID"],
|
||||
dc["LOCATION"],
|
||||
country,
|
||||
self))
|
||||
return nl
|
||||
|
||||
def linode_set_datacenter(self, dc):
|
||||
"""
|
||||
Set the default datacenter for Linode creation
|
||||
|
||||
Since Linodes must be created in a facility, this function sets the
|
||||
default that :class:`create_node` will use. If a location keyword is
|
||||
not passed to :class:`create_node`, this method must have already been
|
||||
used.
|
||||
|
||||
:keyword dc: the datacenter to create Linodes in unless specified
|
||||
:type dc: :class:`NodeLocation`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
did = dc.id
|
||||
params = {"api_action": "avail.datacenters"}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
for datacenter in data:
|
||||
if did == dc["DATACENTERID"]:
|
||||
self.datacenter = did
|
||||
return
|
||||
|
||||
dcs = ", ".join([d["DATACENTERID"] for d in data])
|
||||
self.datacenter = None
|
||||
raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs)
|
||||
|
||||
def _to_nodes(self, objs):
|
||||
"""Convert returned JSON Linodes into Node instances
|
||||
|
||||
:keyword objs: ``list`` of JSON dictionaries representing the Linodes
|
||||
:type objs: ``list``
|
||||
:return: ``list`` of :class:`Node`s"""
|
||||
|
||||
# Get the IP addresses for the Linodes
|
||||
nodes = {}
|
||||
batch = []
|
||||
for o in objs:
|
||||
lid = o["LINODEID"]
|
||||
nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ips=[],
|
||||
private_ips=[],
|
||||
state=self.LINODE_STATES[o["STATUS"]],
|
||||
driver=self.connection.driver)
|
||||
n.extra = copy(o)
|
||||
n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM"))
|
||||
batch.append({"api_action": "linode.ip.list", "LinodeID": lid})
|
||||
|
||||
# Avoid batch limitation
|
||||
ip_answers = []
|
||||
args = [iter(batch)] * 25
|
||||
|
||||
if PY3:
|
||||
izip_longest = itertools.zip_longest
|
||||
else:
|
||||
izip_longest = getattr(itertools, 'izip_longest', _izip_longest)
|
||||
|
||||
for twenty_five in izip_longest(*args):
|
||||
twenty_five = [q for q in twenty_five if q]
|
||||
params = {"api_action": "batch",
|
||||
"api_requestArray": json.dumps(twenty_five)}
|
||||
req = self.connection.request(API_ROOT, params=params)
|
||||
if not req.success() or len(req.objects) == 0:
|
||||
return None
|
||||
ip_answers.extend(req.objects)
|
||||
|
||||
# Add the returned IPs to the nodes and return them
|
||||
for ip_list in ip_answers:
|
||||
for ip in ip_list:
|
||||
lid = ip["LINODEID"]
|
||||
which = nodes[lid].public_ips if ip["ISPUBLIC"] == 1 else\
|
||||
nodes[lid].private_ips
|
||||
which.append(ip["IPADDRESS"])
|
||||
return list(nodes.values())
|
||||
|
||||
|
||||
def _izip_longest(*args, **kwds):
|
||||
"""Taken from Python docs
|
||||
|
||||
http://docs.python.org/library/itertools.html#itertools.izip
|
||||
"""
|
||||
|
||||
fillvalue = kwds.get('fillvalue')
|
||||
|
||||
def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
|
||||
yield counter() # yields the fillvalue, or raises IndexError
|
||||
|
||||
fillers = itertools.repeat(fillvalue)
|
||||
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
|
||||
try:
|
||||
for tup in itertools.izip(*iters):
|
||||
yield tup
|
||||
except IndexError:
|
||||
pass
|
||||
448
awx/lib/site-packages/libcloud/compute/drivers/nephoscale.py
Normal file
448
awx/lib/site-packages/libcloud/compute/drivers/nephoscale.py
Normal file
@@ -0,0 +1,448 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
NephoScale Cloud driver (http://www.nephoscale.com)
|
||||
API documentation: http://docs.nephoscale.com
|
||||
Created by Markos Gogoulos (https://mist.io)
|
||||
"""
|
||||
|
||||
import base64
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import binascii
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
from libcloud.utils.py3 import urlencode
|
||||
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
|
||||
from libcloud.compute.types import (NodeState, InvalidCredsError,
|
||||
LibcloudError)
|
||||
from libcloud.compute.base import (Node, NodeDriver, NodeImage, NodeSize,
|
||||
NodeLocation)
|
||||
from libcloud.utils.networking import is_private_subnet
|
||||
|
||||
API_HOST = 'api.nephoscale.com'
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'on': NodeState.RUNNING,
|
||||
'off': NodeState.UNKNOWN,
|
||||
'unknown': NodeState.UNKNOWN,
|
||||
}
|
||||
|
||||
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
|
||||
httplib.NO_CONTENT]
|
||||
|
||||
# used in create_node and specifies how many times to get the list of nodes and
|
||||
# check if the newly created node is there. This is because when a request is
|
||||
# sent to create a node, NephoScale replies with the job id, and not the node
|
||||
# itself thus we don't have the ip addresses, that are required in deploy_node
|
||||
CONNECT_ATTEMPTS = 10
|
||||
|
||||
|
||||
class NodeKey(object):
|
||||
def __init__(self, id, name, public_key=None, key_group=None,
|
||||
password=None):
|
||||
self.id = id
|
||||
self.name = name
|
||||
self.key_group = key_group
|
||||
self.password = password
|
||||
self.public_key = public_key
|
||||
|
||||
def __repr__(self):
|
||||
return (('<NodeKey: id=%s, name=%s>') %
|
||||
(self.id, self.name))
|
||||
|
||||
|
||||
class NephoscaleResponse(JsonResponse):
|
||||
"""
|
||||
Nephoscale API Response
|
||||
"""
|
||||
|
||||
def parse_error(self):
|
||||
if self.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError('Authorization Failed')
|
||||
if self.status == httplib.NOT_FOUND:
|
||||
raise Exception("The resource you are looking for is not found.")
|
||||
|
||||
return self.body
|
||||
|
||||
def success(self):
|
||||
return self.status in VALID_RESPONSE_CODES
|
||||
|
||||
|
||||
class NephoscaleConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Nephoscale connection class.
|
||||
Authenticates to the API through Basic Authentication
|
||||
with username/password
|
||||
"""
|
||||
host = API_HOST
|
||||
responseCls = NephoscaleResponse
|
||||
|
||||
allow_insecure = False
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
"""
|
||||
Add parameters that are necessary for every request
|
||||
"""
|
||||
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
|
||||
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
|
||||
return headers
|
||||
|
||||
|
||||
class NephoscaleNodeDriver(NodeDriver):
|
||||
"""
|
||||
Nephoscale node driver class.
|
||||
|
||||
>>> from libcloud.compute.providers import get_driver
|
||||
>>> driver = get_driver('nephoscale')
|
||||
>>> conn = driver('nepho_user','nepho_password')
|
||||
>>> conn.list_nodes()
|
||||
"""
|
||||
|
||||
type = Provider.NEPHOSCALE
|
||||
api_name = 'nephoscale'
|
||||
name = 'NephoScale'
|
||||
website = 'http://www.nephoscale.com'
|
||||
connectionCls = NephoscaleConnection
|
||||
features = {'create_node': ['ssh_key']}
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
List available zones for deployment
|
||||
|
||||
:rtype: ``list`` of :class:`NodeLocation`
|
||||
"""
|
||||
result = self.connection.request('/datacenter/zone/').object
|
||||
locations = []
|
||||
for value in result.get('data', []):
|
||||
location = NodeLocation(id=value.get('id'),
|
||||
name=value.get('name'),
|
||||
country='US',
|
||||
driver=self)
|
||||
locations.append(location)
|
||||
return locations
|
||||
|
||||
def list_images(self):
|
||||
"""
|
||||
List available images for deployment
|
||||
|
||||
:rtype: ``list`` of :class:`NodeImage`
|
||||
"""
|
||||
result = self.connection.request('/image/server/').object
|
||||
images = []
|
||||
for value in result.get('data', []):
|
||||
extra = {'architecture': value.get('architecture'),
|
||||
'disks': value.get('disks'),
|
||||
'billable_type': value.get('billable_type'),
|
||||
'pcpus': value.get('pcpus'),
|
||||
'cores': value.get('cores'),
|
||||
'uri': value.get('uri'),
|
||||
'storage': value.get('storage'),
|
||||
}
|
||||
image = NodeImage(id=value.get('id'),
|
||||
name=value.get('friendly_name'),
|
||||
driver=self,
|
||||
extra=extra)
|
||||
images.append(image)
|
||||
return images
|
||||
|
||||
def list_sizes(self):
|
||||
"""
|
||||
List available sizes containing prices
|
||||
|
||||
:rtype: ``list`` of :class:`NodeSize`
|
||||
"""
|
||||
result = self.connection.request('/server/type/cloud/').object
|
||||
sizes = []
|
||||
for value in result.get('data', []):
|
||||
value_id = value.get('id')
|
||||
size = NodeSize(id=value_id,
|
||||
name=value.get('friendly_name'),
|
||||
ram=value.get('ram'),
|
||||
disk=value.get('storage'),
|
||||
bandwidth=None,
|
||||
price=self._get_size_price(size_id=str(value_id)),
|
||||
driver=self)
|
||||
sizes.append(size)
|
||||
|
||||
return sorted(sizes, key=lambda k: k.price)
|
||||
|
||||
def list_nodes(self):
|
||||
"""
|
||||
List available nodes
|
||||
|
||||
:rtype: ``list`` of :class:`Node`
|
||||
"""
|
||||
result = self.connection.request('/server/cloud/').object
|
||||
nodes = [self._to_node(value) for value in result.get('data', [])]
|
||||
return nodes
|
||||
|
||||
def rename_node(self, node, name, hostname=None):
|
||||
"""rename a cloud server, optionally specify hostname too"""
|
||||
data = {'name': name}
|
||||
if hostname:
|
||||
data['hostname'] = hostname
|
||||
params = urlencode(data)
|
||||
result = self.connection.request('/server/cloud/%s/' % node.id,
|
||||
data=params, method='PUT').object
|
||||
return result.get('response') in VALID_RESPONSE_CODES
|
||||
|
||||
def reboot_node(self, node):
|
||||
"""reboot a running node"""
|
||||
result = self.connection.request('/server/cloud/%s/initiator/restart/'
|
||||
% node.id, method='POST').object
|
||||
return result.get('response') in VALID_RESPONSE_CODES
|
||||
|
||||
def ex_start_node(self, node):
|
||||
"""start a stopped node"""
|
||||
result = self.connection.request('/server/cloud/%s/initiator/start/'
|
||||
% node.id, method='POST').object
|
||||
return result.get('response') in VALID_RESPONSE_CODES
|
||||
|
||||
def ex_stop_node(self, node):
|
||||
"""stop a running node"""
|
||||
result = self.connection.request('/server/cloud/%s/initiator/stop/'
|
||||
% node.id, method='POST').object
|
||||
return result.get('response') in VALID_RESPONSE_CODES
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""destroy a node"""
|
||||
result = self.connection.request('/server/cloud/%s/' % node.id,
|
||||
method='DELETE').object
|
||||
return result.get('response') in VALID_RESPONSE_CODES
|
||||
|
||||
def ex_list_keypairs(self, ssh=False, password=False, key_group=None):
|
||||
"""
|
||||
List available console and server keys
|
||||
There are two types of keys for NephoScale, ssh and password keys.
|
||||
If run without arguments, lists all keys. Otherwise list only
|
||||
ssh keys, or only password keys.
|
||||
Password keys with key_group 4 are console keys. When a server
|
||||
is created, it has two keys, one password or ssh key, and
|
||||
one password console key.
|
||||
|
||||
:keyword ssh: if specified, show ssh keys only (optional)
|
||||
:type ssh: ``bool``
|
||||
|
||||
:keyword password: if specified, show password keys only (optional)
|
||||
:type password: ``bool``
|
||||
|
||||
:keyword key_group: if specified, show keys with this key_group only
|
||||
eg key_group=4 for console password keys (optional)
|
||||
:type key_group: ``int``
|
||||
|
||||
:rtype: ``list`` of :class:`NodeKey`
|
||||
"""
|
||||
if (ssh and password):
|
||||
raise LibcloudError('You can only supply ssh or password. To \
|
||||
get all keys call with no arguments')
|
||||
if ssh:
|
||||
result = self.connection.request('/key/sshrsa/').object
|
||||
elif password:
|
||||
result = self.connection.request('/key/password/').object
|
||||
else:
|
||||
result = self.connection.request('/key/').object
|
||||
keys = [self._to_key(value) for value in result.get('data', [])]
|
||||
|
||||
if key_group:
|
||||
keys = [key for key in keys if
|
||||
key.key_group == key_group]
|
||||
return keys
|
||||
|
||||
def ex_create_keypair(self, name, public_key=None, password=None,
|
||||
key_group=None):
|
||||
"""Creates a key, ssh or password, for server or console
|
||||
The group for the key (key_group) is 1 for Server and 4 for Console
|
||||
Returns the id of the created key
|
||||
"""
|
||||
if public_key:
|
||||
if not key_group:
|
||||
key_group = 1
|
||||
data = {
|
||||
'name': name,
|
||||
'public_key': public_key,
|
||||
'key_group': key_group
|
||||
|
||||
}
|
||||
params = urlencode(data)
|
||||
result = self.connection.request('/key/sshrsa/', data=params,
|
||||
method='POST').object
|
||||
else:
|
||||
if not key_group:
|
||||
key_group = 4
|
||||
if not password:
|
||||
password = self.random_password()
|
||||
data = {
|
||||
'name': name,
|
||||
'password': password,
|
||||
'key_group': key_group
|
||||
}
|
||||
params = urlencode(data)
|
||||
result = self.connection.request('/key/password/', data=params,
|
||||
method='POST').object
|
||||
return result.get('data', {}).get('id', '')
|
||||
|
||||
def ex_delete_keypair(self, key_id, ssh=False):
|
||||
"""Delete an ssh key or password given it's id
|
||||
"""
|
||||
if ssh:
|
||||
result = self.connection.request('/key/sshrsa/%s/' % key_id,
|
||||
method='DELETE').object
|
||||
else:
|
||||
result = self.connection.request('/key/password/%s/' % key_id,
|
||||
method='DELETE').object
|
||||
return result.get('response') in VALID_RESPONSE_CODES
|
||||
|
||||
def create_node(self, name, size, image, server_key=None,
|
||||
console_key=None, zone=None, **kwargs):
|
||||
"""Creates the node, and sets the ssh key, console key
|
||||
NephoScale will respond with a 200-200 response after sending a valid
|
||||
request. If nowait=True is specified in the args, we then ask a few
|
||||
times until the server is created and assigned a public IP address,
|
||||
so that deploy_node can be run
|
||||
|
||||
>>> from libcloud.compute.providers import get_driver
|
||||
>>> driver = get_driver('nephoscale')
|
||||
>>> conn = driver('nepho_user','nepho_password')
|
||||
>>> conn.list_nodes()
|
||||
>>> name = 'staging-server'
|
||||
>>> size = conn.list_sizes()[0]
|
||||
<NodeSize: id=27, ...name=CS025 - 0.25GB, 10GB, ...>
|
||||
>>> image = conn.list_images()[9]
|
||||
<NodeImage: id=49, name=Linux Ubuntu Server 10.04 LTS 64-bit, ...>
|
||||
>>> server_keys = conn.ex_list_keypairs(key_group=1)[0]
|
||||
<NodeKey: id=71211, name=markos>
|
||||
>>> server_key = conn.ex_list_keypairs(key_group=1)[0].id
|
||||
70867
|
||||
>>> console_keys = conn.ex_list_keypairs(key_group=4)[0]
|
||||
<NodeKey: id=71213, name=mistio28434>
|
||||
>>> console_key = conn.ex_list_keypairs(key_group=4)[0].id
|
||||
70907
|
||||
>>> node = conn.create_node(name=name, size=size, image=image, \
|
||||
console_key=console_key, server_key=server_key)
|
||||
|
||||
We can also create an ssh key, plus a console key and
|
||||
deploy node with them
|
||||
>>> server_key = conn.ex_create_keypair(name, public_key='123')
|
||||
71211
|
||||
>>> console_key = conn.ex_create_keypair(name, key_group=4)
|
||||
71213
|
||||
|
||||
We can increase the number of connect attempts to wait until
|
||||
the node is created, so that deploy_node has ip address to
|
||||
deploy the script
|
||||
We can also specify the location
|
||||
>>> location = conn.list_locations()[0]
|
||||
>>> node = conn.create_node(name=name,
|
||||
... size=size,
|
||||
... image=image,
|
||||
... console_key=console_key,
|
||||
... server_key=server_key,
|
||||
... connect_attempts=10,
|
||||
... nowait=True,
|
||||
... zone=location.id)
|
||||
"""
|
||||
hostname = kwargs.get('hostname', name)
|
||||
service_type = size.id
|
||||
image = image.id
|
||||
connect_attempts = int(kwargs.get('connect_attempts',
|
||||
CONNECT_ATTEMPTS))
|
||||
|
||||
data = {'name': name,
|
||||
'hostname': hostname,
|
||||
'service_type': service_type,
|
||||
'image': image,
|
||||
'server_key': server_key,
|
||||
'console_key': console_key,
|
||||
'zone': zone
|
||||
}
|
||||
|
||||
params = urlencode(data)
|
||||
try:
|
||||
node = self.connection.request('/server/cloud/', data=params,
|
||||
method='POST')
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
raise Exception("Failed to create node %s" % e)
|
||||
node = Node(id='', name=name, state=NodeState.UNKNOWN, public_ips=[],
|
||||
private_ips=[], driver=self)
|
||||
|
||||
nowait = kwargs.get('ex_wait', False)
|
||||
if not nowait:
|
||||
return node
|
||||
else:
|
||||
# try to get the created node public ips, for use in deploy_node
|
||||
# At this point we don't have the id of the newly created Node,
|
||||
# so search name in nodes
|
||||
created_node = False
|
||||
while connect_attempts > 0:
|
||||
nodes = self.list_nodes()
|
||||
created_node = [c_node for c_node in nodes if
|
||||
c_node.name == name]
|
||||
if created_node:
|
||||
return created_node[0]
|
||||
else:
|
||||
time.sleep(60)
|
||||
connect_attempts = connect_attempts - 1
|
||||
return node
|
||||
|
||||
def _to_node(self, data):
|
||||
"""Convert node in Node instances
|
||||
"""
|
||||
|
||||
state = NODE_STATE_MAP.get(data.get('power_status'), '4')
|
||||
public_ips = []
|
||||
private_ips = []
|
||||
ip_addresses = data.get('ipaddresses', '')
|
||||
# E.g. "ipaddresses": "198.120.14.6, 10.132.60.1"
|
||||
if ip_addresses:
|
||||
for ip in ip_addresses.split(','):
|
||||
ip = ip.replace(' ', '')
|
||||
if is_private_subnet(ip):
|
||||
private_ips.append(ip)
|
||||
else:
|
||||
public_ips.append(ip)
|
||||
extra = {
|
||||
'zone_data': data.get('zone'),
|
||||
'zone': data.get('zone', {}).get('name'),
|
||||
'image': data.get('image', {}).get('friendly_name'),
|
||||
'create_time': data.get('create_time'),
|
||||
'network_ports': data.get('network_ports'),
|
||||
'is_console_enabled': data.get('is_console_enabled'),
|
||||
'service_type': data.get('service_type', {}).get('friendly_name'),
|
||||
'hostname': data.get('hostname')
|
||||
}
|
||||
|
||||
node = Node(id=data.get('id'), name=data.get('name'), state=state,
|
||||
public_ips=public_ips, private_ips=private_ips,
|
||||
driver=self, extra=extra)
|
||||
return node
|
||||
|
||||
def _to_key(self, data):
|
||||
return NodeKey(id=data.get('id'),
|
||||
name=data.get('name'),
|
||||
password=data.get('password'),
|
||||
key_group=data.get('key_group'),
|
||||
public_key=data.get('public_key'))
|
||||
|
||||
def random_password(self, size=8):
|
||||
value = os.urandom(size)
|
||||
password = binascii.hexlify(value).decode('ascii')
|
||||
return password[:size]
|
||||
29
awx/lib/site-packages/libcloud/compute/drivers/ninefold.py
Normal file
29
awx/lib/site-packages/libcloud/compute/drivers/ninefold.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.compute.providers import Provider
|
||||
|
||||
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
|
||||
|
||||
|
||||
class NinefoldNodeDriver(CloudStackNodeDriver):
|
||||
"Driver for Ninefold's Compute platform."
|
||||
|
||||
host = 'api.ninefold.com'
|
||||
path = '/compute/v1.0/'
|
||||
|
||||
type = Provider.NINEFOLD
|
||||
name = 'Ninefold'
|
||||
website = 'http://ninefold.com/'
|
||||
1264
awx/lib/site-packages/libcloud/compute/drivers/opennebula.py
Normal file
1264
awx/lib/site-packages/libcloud/compute/drivers/opennebula.py
Normal file
File diff suppressed because it is too large
Load Diff
2439
awx/lib/site-packages/libcloud/compute/drivers/openstack.py
Normal file
2439
awx/lib/site-packages/libcloud/compute/drivers/openstack.py
Normal file
File diff suppressed because it is too large
Load Diff
620
awx/lib/site-packages/libcloud/compute/drivers/opsource.py
Normal file
620
awx/lib/site-packages/libcloud/compute/drivers/opsource.py
Normal file
@@ -0,0 +1,620 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Opsource Driver
|
||||
"""
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from base64 import b64encode
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.compute.base import NodeDriver, Node
|
||||
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
|
||||
from libcloud.common.types import LibcloudError, InvalidCredsError
|
||||
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
|
||||
from libcloud.utils.xml import fixxpath, findtext, findall
|
||||
from libcloud.compute.types import NodeState, Provider
|
||||
|
||||
# Roadmap / TODO:
|
||||
#
|
||||
# 0.1 - Basic functionality: create, delete, start, stop, reboot - servers
|
||||
# (base OS images only, no customer images supported yet)
|
||||
# x implement list_nodes()
|
||||
# x implement create_node() (only support Base OS images,
|
||||
# no customer images yet)
|
||||
# x implement reboot()
|
||||
# x implement destroy_node()
|
||||
# x implement list_sizes()
|
||||
# x implement list_images() (only support Base OS images,
|
||||
# no customer images yet)
|
||||
# x implement list_locations()
|
||||
# x implement ex_* extension functions for opsource-specific featurebody
|
||||
# x ex_graceful_shutdown
|
||||
# x ex_start_node
|
||||
# x ex_power_off
|
||||
# x ex_list_networks (needed for create_node())
|
||||
# x refactor: switch to using fixxpath() from the vcloud driver for
|
||||
# dealing with xml namespace tags
|
||||
# x refactor: move some functionality from OpsourceConnection.request()
|
||||
# method into new .request_with_orgId() method
|
||||
# x add OpsourceStatus object support to:
|
||||
# x _to_node()
|
||||
# x _to_network()
|
||||
# x implement test cases
|
||||
#
|
||||
# 0.2 - Support customer images (snapshots) and server modification functions
|
||||
# - support customer-created images:
|
||||
# - list deployed customer images (in list_images() ?)
|
||||
# - list pending customer images (in list_images() ?)
|
||||
# - delete customer images
|
||||
# - modify customer images
|
||||
# - add "pending-servers" in list_nodes()
|
||||
# - implement various ex_* extension functions for opsource-specific
|
||||
# features
|
||||
# - ex_modify_server()
|
||||
# - ex_add_storage_to_server()
|
||||
# - ex_snapshot_server() (create's customer image)
|
||||
#
|
||||
# 0.3 - support Network API
|
||||
# 0.4 - Support VIP/Load-balancing API
|
||||
# 0.5 - support Files Account API
|
||||
# 0.6 - support Reports API
|
||||
# 1.0 - Opsource 0.9 API feature complete, tested
|
||||
|
||||
# setup a few variables to represent all of the opsource cloud namespaces
|
||||
NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
|
||||
ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
|
||||
SERVER_NS = NAMESPACE_BASE + "/server"
|
||||
NETWORK_NS = NAMESPACE_BASE + "/network"
|
||||
DIRECTORY_NS = NAMESPACE_BASE + "/directory"
|
||||
RESET_NS = NAMESPACE_BASE + "/reset"
|
||||
VIP_NS = NAMESPACE_BASE + "/vip"
|
||||
IMAGEIMPORTEXPORT_NS = NAMESPACE_BASE + "/imageimportexport"
|
||||
DATACENTER_NS = NAMESPACE_BASE + "/datacenter"
|
||||
SUPPORT_NS = NAMESPACE_BASE + "/support"
|
||||
GENERAL_NS = NAMESPACE_BASE + "/general"
|
||||
IPPLAN_NS = NAMESPACE_BASE + "/ipplan"
|
||||
WHITELABEL_NS = NAMESPACE_BASE + "/whitelabel"
|
||||
|
||||
|
||||
class OpsourceResponse(XmlResponse):
|
||||
|
||||
def parse_error(self):
|
||||
if self.status == httplib.UNAUTHORIZED:
|
||||
raise InvalidCredsError(self.body)
|
||||
elif self.status == httplib.FORBIDDEN:
|
||||
raise InvalidCredsError(self.body)
|
||||
|
||||
body = self.parse_body()
|
||||
|
||||
if self.status == httplib.BAD_REQUEST:
|
||||
code = findtext(body, 'resultCode', SERVER_NS)
|
||||
message = findtext(body, 'resultDetail', SERVER_NS)
|
||||
raise OpsourceAPIException(code,
|
||||
message,
|
||||
driver=OpsourceNodeDriver)
|
||||
|
||||
return self.body
|
||||
|
||||
|
||||
class OpsourceAPIException(LibcloudError):
|
||||
def __init__(self, code, msg, driver):
|
||||
self.code = code
|
||||
self.msg = msg
|
||||
self.driver = driver
|
||||
|
||||
def __str__(self):
|
||||
return "%s: %s" % (self.code, self.msg)
|
||||
|
||||
def __repr__(self):
|
||||
return ("<OpsourceAPIException: code='%s', msg='%s'>" %
|
||||
(self.code, self.msg))
|
||||
|
||||
|
||||
class OpsourceConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the Opsource driver
|
||||
"""
|
||||
|
||||
host = 'api.opsourcecloud.net'
|
||||
api_path = '/oec'
|
||||
api_version = '0.9'
|
||||
_orgId = None
|
||||
responseCls = OpsourceResponse
|
||||
|
||||
allow_insecure = False
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['Authorization'] = \
|
||||
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
|
||||
self.key))).decode('utf-8'))
|
||||
return headers
|
||||
|
||||
def request(self, action, params=None, data='',
|
||||
headers=None, method='GET'):
|
||||
action = "%s/%s/%s" % (self.api_path, self.api_version, action)
|
||||
|
||||
return super(OpsourceConnection, self).request(
|
||||
action=action,
|
||||
params=params, data=data,
|
||||
method=method, headers=headers)
|
||||
|
||||
def request_with_orgId(self, action, params=None, data='',
|
||||
headers=None, method='GET'):
|
||||
action = "%s/%s" % (self.get_resource_path(), action)
|
||||
|
||||
return super(OpsourceConnection, self).request(
|
||||
action=action,
|
||||
params=params, data=data,
|
||||
method=method, headers=headers)
|
||||
|
||||
def get_resource_path(self):
|
||||
"""
|
||||
This method returns a resource path which is necessary for referencing
|
||||
resources that require a full path instead of just an ID, such as
|
||||
networks, and customer snapshots.
|
||||
"""
|
||||
return ("%s/%s/%s" % (self.api_path, self.api_version,
|
||||
self._get_orgId()))
|
||||
|
||||
def _get_orgId(self):
|
||||
"""
|
||||
Send the /myaccount API request to opsource cloud and parse the
|
||||
'orgId' from the XML response object. We need the orgId to use most
|
||||
of the other API functions
|
||||
"""
|
||||
if self._orgId is None:
|
||||
body = self.request('myaccount').object
|
||||
self._orgId = findtext(body, 'orgId', DIRECTORY_NS)
|
||||
return self._orgId
|
||||
|
||||
|
||||
class OpsourceStatus(object):
|
||||
"""
|
||||
Opsource API pending operation status class
|
||||
action, requestTime, username, numberOfSteps, updateTime,
|
||||
step.name, step.number, step.percentComplete, failureReason,
|
||||
"""
|
||||
def __init__(self, action=None, requestTime=None, userName=None,
|
||||
numberOfSteps=None, updateTime=None, step_name=None,
|
||||
step_number=None, step_percentComplete=None,
|
||||
failureReason=None):
|
||||
self.action = action
|
||||
self.requestTime = requestTime
|
||||
self.userName = userName
|
||||
self.numberOfSteps = numberOfSteps
|
||||
self.updateTime = updateTime
|
||||
self.step_name = step_name
|
||||
self.step_number = step_number
|
||||
self.step_percentComplete = step_percentComplete
|
||||
self.failureReason = failureReason
|
||||
|
||||
def __repr__(self):
|
||||
return (('<OpsourceStatus: action=%s, requestTime=%s, userName=%s, '
|
||||
'numberOfSteps=%s, updateTime=%s, step_name=%s, '
|
||||
'step_number=%s, step_percentComplete=%s, '
|
||||
'failureReason=%s')
|
||||
% (self.action, self.requestTime, self.userName,
|
||||
self.numberOfSteps, self.updateTime, self.step_name,
|
||||
self.step_number, self.step_percentComplete,
|
||||
self.failureReason))
|
||||
|
||||
|
||||
class OpsourceNetwork(object):
|
||||
"""
|
||||
Opsource network with location.
|
||||
"""
|
||||
|
||||
def __init__(self, id, name, description, location, privateNet,
|
||||
multicast, status):
|
||||
self.id = str(id)
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.location = location
|
||||
self.privateNet = privateNet
|
||||
self.multicast = multicast
|
||||
self.status = status
|
||||
|
||||
def __repr__(self):
|
||||
return (('<OpsourceNetwork: id=%s, name=%s, description=%s, '
|
||||
'location=%s, privateNet=%s, multicast=%s>')
|
||||
% (self.id, self.name, self.description, self.location,
|
||||
self.privateNet, self.multicast))
|
||||
|
||||
|
||||
class OpsourceNodeDriver(NodeDriver):
|
||||
"""
|
||||
Opsource node driver.
|
||||
"""
|
||||
|
||||
connectionCls = OpsourceConnection
|
||||
name = 'Opsource'
|
||||
website = 'http://www.opsource.net/'
|
||||
type = Provider.OPSOURCE
|
||||
features = {'create_node': ['password']}
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""
|
||||
Create a new opsource node
|
||||
|
||||
:keyword name: String with a name for this new node (required)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword image: OS Image to boot on node. (required)
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword auth: Initial authentication information for the
|
||||
node (required)
|
||||
:type auth: :class:`NodeAuthPassword`
|
||||
|
||||
:keyword ex_description: description for this node (required)
|
||||
:type ex_description: ``str``
|
||||
|
||||
:keyword ex_network: Network to create the node within (required)
|
||||
:type ex_network: :class:`OpsourceNetwork`
|
||||
|
||||
:keyword ex_isStarted: Start server after creation? default
|
||||
true (required)
|
||||
:type ex_isStarted: ``bool``
|
||||
|
||||
:return: The newly created :class:`Node`. NOTE: Opsource does not
|
||||
provide a
|
||||
way to determine the ID of the server that was just created,
|
||||
so the returned :class:`Node` is not guaranteed to be the same
|
||||
one that was created. This is only the case when multiple
|
||||
nodes with the same name exist.
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
name = kwargs['name']
|
||||
image = kwargs['image']
|
||||
|
||||
# XXX: Node sizes can be adjusted after a node is created, but
|
||||
# cannot be set at create time because size is part of the
|
||||
# image definition.
|
||||
password = None
|
||||
auth = self._get_and_check_auth(kwargs.get('auth'))
|
||||
password = auth.password
|
||||
|
||||
ex_description = kwargs.get('ex_description', '')
|
||||
ex_isStarted = kwargs.get('ex_isStarted', True)
|
||||
|
||||
ex_network = kwargs.get('ex_network')
|
||||
if not isinstance(ex_network, OpsourceNetwork):
|
||||
raise ValueError('ex_network must be of OpsourceNetwork type')
|
||||
vlanResourcePath = "%s/%s" % (self.connection.get_resource_path(),
|
||||
ex_network.id)
|
||||
|
||||
imageResourcePath = None
|
||||
if 'resourcePath' in image.extra:
|
||||
imageResourcePath = image.extra['resourcePath']
|
||||
else:
|
||||
imageResourcePath = "%s/%s" % (self.connection.get_resource_path(),
|
||||
image.id)
|
||||
|
||||
server_elm = ET.Element('Server', {'xmlns': SERVER_NS})
|
||||
ET.SubElement(server_elm, "name").text = name
|
||||
ET.SubElement(server_elm, "description").text = ex_description
|
||||
ET.SubElement(server_elm, "vlanResourcePath").text = vlanResourcePath
|
||||
ET.SubElement(server_elm, "imageResourcePath").text = imageResourcePath
|
||||
ET.SubElement(server_elm, "administratorPassword").text = password
|
||||
ET.SubElement(server_elm, "isStarted").text = str(ex_isStarted)
|
||||
|
||||
self.connection.request_with_orgId('server',
|
||||
method='POST',
|
||||
data=ET.tostring(server_elm)).object
|
||||
|
||||
# XXX: return the last node in the list that has a matching name. this
|
||||
# is likely but not guaranteed to be the node we just created
|
||||
# because opsource allows multiple nodes to have the same name
|
||||
node = list(filter(lambda x: x.name == name, self.list_nodes()))[-1]
|
||||
|
||||
if getattr(auth, "generated", False):
|
||||
node.extra['password'] = auth.password
|
||||
|
||||
return node
|
||||
|
||||
def destroy_node(self, node):
|
||||
body = self.connection.request_with_orgId(
|
||||
'server/%s?delete' % (node.id)).object
|
||||
|
||||
result = findtext(body, 'result', GENERAL_NS)
|
||||
return result == 'SUCCESS'
|
||||
|
||||
def reboot_node(self, node):
|
||||
body = self.connection.request_with_orgId(
|
||||
'server/%s?restart' % (node.id)).object
|
||||
result = findtext(body, 'result', GENERAL_NS)
|
||||
return result == 'SUCCESS'
|
||||
|
||||
def list_nodes(self):
|
||||
nodes = self._to_nodes(
|
||||
self.connection.request_with_orgId('server/deployed').object)
|
||||
nodes.extend(self._to_nodes(
|
||||
self.connection.request_with_orgId('server/pendingDeploy').object))
|
||||
return nodes
|
||||
|
||||
def list_images(self, location=None):
|
||||
"""
|
||||
return a list of available images
|
||||
Currently only returns the default 'base OS images' provided by
|
||||
opsource. Customer images (snapshots) are not yet supported.
|
||||
|
||||
@inherits: :class:`NodeDriver.list_images`
|
||||
"""
|
||||
return self._to_base_images(
|
||||
self.connection.request('base/image').object)
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
return [
|
||||
NodeSize(id=1,
|
||||
name="default",
|
||||
ram=0,
|
||||
disk=0,
|
||||
bandwidth=0,
|
||||
price=0,
|
||||
driver=self.connection.driver),
|
||||
]
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
list locations (datacenters) available for instantiating servers and
|
||||
networks.
|
||||
|
||||
@inherits: :class:`NodeDriver.list_locations`
|
||||
"""
|
||||
return self._to_locations(
|
||||
self.connection.request_with_orgId('datacenter').object)
|
||||
|
||||
def list_networks(self, location=None):
|
||||
"""
|
||||
List networks deployed across all data center locations for your
|
||||
organization. The response includes the location of each network.
|
||||
|
||||
|
||||
:keyword location: The location
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:return: a list of OpsourceNetwork objects
|
||||
:rtype: ``list`` of :class:`OpsourceNetwork`
|
||||
"""
|
||||
return self._to_networks(
|
||||
self.connection.request_with_orgId('networkWithLocation').object)
|
||||
|
||||
def _to_base_images(self, object):
|
||||
images = []
|
||||
for element in object.findall(fixxpath("ServerImage", SERVER_NS)):
|
||||
images.append(self._to_base_image(element))
|
||||
|
||||
return images
|
||||
|
||||
def _to_base_image(self, element):
|
||||
# Eventually we will probably need multiple _to_image() functions
|
||||
# that parse <ServerImage> differently than <DeployedImage>.
|
||||
# DeployedImages are customer snapshot images, and ServerImages are
|
||||
# 'base' images provided by opsource
|
||||
location_id = findtext(element, 'location', SERVER_NS)
|
||||
location = self.ex_get_location_by_id(location_id)
|
||||
|
||||
extra = {
|
||||
'description': findtext(element, 'description', SERVER_NS),
|
||||
'OS_type': findtext(element, 'operatingSystem/type', SERVER_NS),
|
||||
'OS_displayName': findtext(element, 'operatingSystem/displayName',
|
||||
SERVER_NS),
|
||||
'cpuCount': findtext(element, 'cpuCount', SERVER_NS),
|
||||
'resourcePath': findtext(element, 'resourcePath', SERVER_NS),
|
||||
'memory': findtext(element, 'memory', SERVER_NS),
|
||||
'osStorage': findtext(element, 'osStorage', SERVER_NS),
|
||||
'additionalStorage': findtext(element, 'additionalStorage',
|
||||
SERVER_NS),
|
||||
'created': findtext(element, 'created', SERVER_NS),
|
||||
'location': location,
|
||||
}
|
||||
|
||||
return NodeImage(id=str(findtext(element, 'id', SERVER_NS)),
|
||||
name=str(findtext(element, 'name', SERVER_NS)),
|
||||
extra=extra,
|
||||
driver=self.connection.driver)
|
||||
|
||||
def ex_start_node(self, node):
|
||||
"""
|
||||
Powers on an existing deployed server
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
body = self.connection.request_with_orgId(
|
||||
'server/%s?start' % node.id).object
|
||||
result = findtext(body, 'result', GENERAL_NS)
|
||||
return result == 'SUCCESS'
|
||||
|
||||
def ex_shutdown_graceful(self, node):
|
||||
"""
|
||||
This function will attempt to "gracefully" stop a server by
|
||||
initiating a shutdown sequence within the guest operating system.
|
||||
A successful response on this function means the system has
|
||||
successfully passed the request into the operating system.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
body = self.connection.request_with_orgId(
|
||||
'server/%s?shutdown' % (node.id)).object
|
||||
result = findtext(body, 'result', GENERAL_NS)
|
||||
return result == 'SUCCESS'
|
||||
|
||||
def ex_power_off(self, node):
|
||||
"""
|
||||
This function will abruptly power-off a server. Unlike
|
||||
ex_shutdown_graceful, success ensures the node will stop but some OS
|
||||
and application configurations may be adversely affected by the
|
||||
equivalent of pulling the power plug out of the machine.
|
||||
|
||||
:param node: Node which should be used
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
body = self.connection.request_with_orgId(
|
||||
'server/%s?poweroff' % node.id).object
|
||||
result = findtext(body, 'result', GENERAL_NS)
|
||||
return result == 'SUCCESS'
|
||||
|
||||
def ex_list_networks(self):
|
||||
"""
|
||||
List networks deployed across all data center locations for your
|
||||
organization. The response includes the location of each network.
|
||||
|
||||
:return: a list of OpsourceNetwork objects
|
||||
:rtype: ``list`` of :class:`OpsourceNetwork`
|
||||
"""
|
||||
response = self.connection.request_with_orgId('networkWithLocation') \
|
||||
.object
|
||||
return self._to_networks(response)
|
||||
|
||||
def ex_get_location_by_id(self, id):
|
||||
"""
|
||||
Get location by ID.
|
||||
|
||||
:param id: ID of the node location which should be used
|
||||
:type id: ``str``
|
||||
|
||||
:rtype: :class:`NodeLocation`
|
||||
"""
|
||||
location = None
|
||||
if id is not None:
|
||||
location = list(
|
||||
filter(lambda x: x.id == id, self.list_locations()))[0]
|
||||
return location
|
||||
|
||||
def _to_networks(self, object):
|
||||
networks = []
|
||||
for element in findall(object, 'network', NETWORK_NS):
|
||||
networks.append(self._to_network(element))
|
||||
|
||||
return networks
|
||||
|
||||
def _to_network(self, element):
|
||||
multicast = False
|
||||
if findtext(element, 'multicast', NETWORK_NS) == 'true':
|
||||
multicast = True
|
||||
|
||||
status = self._to_status(element.find(fixxpath('status', NETWORK_NS)))
|
||||
|
||||
location_id = findtext(element, 'location', NETWORK_NS)
|
||||
location = self.ex_get_location_by_id(location_id)
|
||||
|
||||
return OpsourceNetwork(id=findtext(element, 'id', NETWORK_NS),
|
||||
name=findtext(element, 'name', NETWORK_NS),
|
||||
description=findtext(element, 'description',
|
||||
NETWORK_NS),
|
||||
location=location,
|
||||
privateNet=findtext(element, 'privateNet',
|
||||
NETWORK_NS),
|
||||
multicast=multicast,
|
||||
status=status)
|
||||
|
||||
def _to_locations(self, object):
|
||||
locations = []
|
||||
for element in object.findall(fixxpath('datacenter', DATACENTER_NS)):
|
||||
locations.append(self._to_location(element))
|
||||
|
||||
return locations
|
||||
|
||||
def _to_location(self, element):
|
||||
l = NodeLocation(id=findtext(element, 'location', DATACENTER_NS),
|
||||
name=findtext(element, 'displayName', DATACENTER_NS),
|
||||
country=findtext(element, 'country', DATACENTER_NS),
|
||||
driver=self)
|
||||
return l
|
||||
|
||||
def _to_nodes(self, object):
|
||||
node_elements = object.findall(fixxpath('DeployedServer', SERVER_NS))
|
||||
node_elements.extend(object.findall(
|
||||
fixxpath('PendingDeployServer', SERVER_NS)))
|
||||
return [self._to_node(el) for el in node_elements]
|
||||
|
||||
def _to_node(self, element):
|
||||
if findtext(element, 'isStarted', SERVER_NS) == 'true':
|
||||
state = NodeState.RUNNING
|
||||
else:
|
||||
state = NodeState.TERMINATED
|
||||
|
||||
status = self._to_status(element.find(fixxpath('status', SERVER_NS)))
|
||||
|
||||
extra = {
|
||||
'description': findtext(element, 'description', SERVER_NS),
|
||||
'sourceImageId': findtext(element, 'sourceImageId', SERVER_NS),
|
||||
'networkId': findtext(element, 'networkId', SERVER_NS),
|
||||
'machineName': findtext(element, 'machineName', SERVER_NS),
|
||||
'deployedTime': findtext(element, 'deployedTime', SERVER_NS),
|
||||
'cpuCount': findtext(element, 'machineSpecification/cpuCount',
|
||||
SERVER_NS),
|
||||
'memoryMb': findtext(element, 'machineSpecification/memoryMb',
|
||||
SERVER_NS),
|
||||
'osStorageGb': findtext(element,
|
||||
'machineSpecification/osStorageGb',
|
||||
SERVER_NS),
|
||||
'additionalLocalStorageGb': findtext(
|
||||
element, 'machineSpecification/additionalLocalStorageGb',
|
||||
SERVER_NS),
|
||||
'OS_type': findtext(element,
|
||||
'machineSpecification/operatingSystem/type',
|
||||
SERVER_NS),
|
||||
'OS_displayName': findtext(
|
||||
element, 'machineSpecification/operatingSystem/displayName',
|
||||
SERVER_NS),
|
||||
'status': status,
|
||||
}
|
||||
|
||||
public_ip = findtext(element, 'publicIpAddress', SERVER_NS)
|
||||
|
||||
n = Node(id=findtext(element, 'id', SERVER_NS),
|
||||
name=findtext(element, 'name', SERVER_NS),
|
||||
state=state,
|
||||
public_ips=[public_ip] if public_ip is not None else [],
|
||||
private_ips=findtext(element, 'privateIpAddress', SERVER_NS),
|
||||
driver=self.connection.driver,
|
||||
extra=extra)
|
||||
return n
|
||||
|
||||
def _to_status(self, element):
|
||||
if element is None:
|
||||
return OpsourceStatus()
|
||||
s = OpsourceStatus(action=findtext(element, 'action', SERVER_NS),
|
||||
requestTime=findtext(element, 'requestTime',
|
||||
SERVER_NS),
|
||||
userName=findtext(element, 'userName',
|
||||
SERVER_NS),
|
||||
numberOfSteps=findtext(element, 'numberOfSteps',
|
||||
SERVER_NS),
|
||||
step_name=findtext(element, 'step/name',
|
||||
SERVER_NS),
|
||||
step_number=findtext(element, 'step_number',
|
||||
SERVER_NS),
|
||||
step_percentComplete=findtext(
|
||||
element, 'step/percentComplete', SERVER_NS),
|
||||
failureReason=findtext(element, 'failureReason',
|
||||
SERVER_NS))
|
||||
return s
|
||||
230
awx/lib/site-packages/libcloud/compute/drivers/rackspace.py
Normal file
230
awx/lib/site-packages/libcloud/compute/drivers/rackspace.py
Normal file
@@ -0,0 +1,230 @@
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Rackspace driver
|
||||
"""
|
||||
from libcloud.compute.types import Provider, LibcloudError
|
||||
from libcloud.compute.base import NodeLocation, VolumeSnapshot
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_0_Connection,\
|
||||
OpenStack_1_0_NodeDriver, OpenStack_1_0_Response
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection,\
|
||||
OpenStack_1_1_NodeDriver
|
||||
|
||||
from libcloud.common.rackspace import AUTH_URL
|
||||
|
||||
|
||||
ENDPOINT_ARGS_MAP = {
|
||||
'dfw': {'service_type': 'compute',
|
||||
'name': 'cloudServersOpenStack',
|
||||
'region': 'DFW'},
|
||||
'ord': {'service_type': 'compute',
|
||||
'name': 'cloudServersOpenStack',
|
||||
'region': 'ORD'},
|
||||
'iad': {'service_type': 'compute',
|
||||
'name': 'cloudServersOpenStack',
|
||||
'region': 'IAD'},
|
||||
'lon': {'service_type': 'compute',
|
||||
'name': 'cloudServersOpenStack',
|
||||
'region': 'LON'},
|
||||
'syd': {'service_type': 'compute',
|
||||
'name': 'cloudServersOpenStack',
|
||||
'region': 'SYD'},
|
||||
'hkg': {'service_type': 'compute',
|
||||
'name': 'cloudServersOpenStack',
|
||||
'region': 'HKG'},
|
||||
|
||||
}
|
||||
|
||||
|
||||
class RackspaceFirstGenConnection(OpenStack_1_0_Connection):
|
||||
"""
|
||||
Connection class for the Rackspace first-gen driver.
|
||||
"""
|
||||
responseCls = OpenStack_1_0_Response
|
||||
XML_NAMESPACE = 'http://docs.rackspacecloud.com/servers/api/v1.0'
|
||||
auth_url = AUTH_URL
|
||||
_auth_version = '2.0'
|
||||
cache_busting = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.region = kwargs.pop('region', None)
|
||||
super(RackspaceFirstGenConnection, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_endpoint(self):
|
||||
ep = {}
|
||||
|
||||
if '2.0' in self._auth_version:
|
||||
ep = self.service_catalog.get_endpoint(service_type='compute',
|
||||
name='cloudServers')
|
||||
else:
|
||||
raise LibcloudError(
|
||||
'Auth version "%s" not supported' % (self._auth_version))
|
||||
|
||||
public_url = ep.get('publicURL', None)
|
||||
|
||||
if not public_url:
|
||||
raise LibcloudError('Could not find specified endpoint')
|
||||
|
||||
# This is a nasty hack, but it's required because of how the
|
||||
# auth system works.
|
||||
# Old US accounts can access UK API endpoint, but they don't
|
||||
# have this endpoint in the service catalog. Same goes for the
|
||||
# old UK accounts and US endpoint.
|
||||
if self.region == 'us':
|
||||
# Old UK account, which only have uk endpoint in the catalog
|
||||
public_url = public_url.replace('https://lon.servers.api',
|
||||
'https://servers.api')
|
||||
elif self.region == 'uk':
|
||||
# Old US account, which only has us endpoints in the catalog
|
||||
public_url = public_url.replace('https://servers.api',
|
||||
'https://lon.servers.api')
|
||||
|
||||
return public_url
|
||||
|
||||
|
||||
class RackspaceFirstGenNodeDriver(OpenStack_1_0_NodeDriver):
|
||||
name = 'Rackspace Cloud (First Gen)'
|
||||
website = 'http://www.rackspace.com'
|
||||
connectionCls = RackspaceFirstGenConnection
|
||||
type = Provider.RACKSPACE_FIRST_GEN
|
||||
api_name = 'rackspace'
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
region='us', **kwargs):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.__init__`
|
||||
|
||||
:param region: Region ID which should be used
|
||||
:type region: ``str``
|
||||
"""
|
||||
if region not in ['us', 'uk']:
|
||||
raise ValueError('Invalid region: %s' % (region))
|
||||
|
||||
super(RackspaceFirstGenNodeDriver, self).__init__(key=key,
|
||||
secret=secret,
|
||||
secure=secure,
|
||||
host=host,
|
||||
port=port,
|
||||
region=region,
|
||||
**kwargs)
|
||||
|
||||
def list_locations(self):
|
||||
"""
|
||||
Lists available locations
|
||||
|
||||
Locations cannot be set or retrieved via the API, but currently
|
||||
there are two locations, DFW and ORD.
|
||||
|
||||
@inherits: :class:`OpenStack_1_0_NodeDriver.list_locations`
|
||||
"""
|
||||
if self.region == 'us':
|
||||
locations = [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)]
|
||||
elif self.region == 'uk':
|
||||
locations = [NodeLocation(0, 'Rackspace UK London', 'UK', self)]
|
||||
|
||||
return locations
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
kwargs = self.openstack_connection_kwargs()
|
||||
kwargs['region'] = self.region
|
||||
return kwargs
|
||||
|
||||
|
||||
class RackspaceConnection(OpenStack_1_1_Connection):
|
||||
"""
|
||||
Connection class for the Rackspace next-gen OpenStack base driver.
|
||||
"""
|
||||
|
||||
auth_url = AUTH_URL
|
||||
_auth_version = '2.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.region = kwargs.pop('region', None)
|
||||
self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
|
||||
super(RackspaceConnection, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_endpoint(self):
|
||||
if not self.get_endpoint_args:
|
||||
raise LibcloudError(
|
||||
'RackspaceConnection must have get_endpoint_args set')
|
||||
|
||||
if '2.0' in self._auth_version:
|
||||
ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
|
||||
else:
|
||||
raise LibcloudError(
|
||||
'Auth version "%s" not supported' % (self._auth_version))
|
||||
|
||||
public_url = ep.get('publicURL', None)
|
||||
|
||||
if not public_url:
|
||||
raise LibcloudError('Could not find specified endpoint')
|
||||
|
||||
return public_url
|
||||
|
||||
|
||||
class RackspaceNodeDriver(OpenStack_1_1_NodeDriver):
|
||||
name = 'Rackspace Cloud (Next Gen)'
|
||||
website = 'http://www.rackspace.com'
|
||||
connectionCls = RackspaceConnection
|
||||
type = Provider.RACKSPACE
|
||||
|
||||
_networks_url_prefix = '/os-networksv2'
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
region='dfw', **kwargs):
|
||||
"""
|
||||
@inherits: :class:`NodeDriver.__init__`
|
||||
|
||||
:param region: ID of the region which should be used.
|
||||
:type region: ``str``
|
||||
"""
|
||||
valid_regions = ENDPOINT_ARGS_MAP.keys()
|
||||
|
||||
if region not in valid_regions:
|
||||
raise ValueError('Invalid region: %s' % (region))
|
||||
|
||||
if region == 'lon':
|
||||
self.api_name = 'rackspacenovalon'
|
||||
elif region == 'syd':
|
||||
self.api_name = 'rackspacenovasyd'
|
||||
else:
|
||||
self.api_name = 'rackspacenovaus'
|
||||
|
||||
super(RackspaceNodeDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port,
|
||||
region=region,
|
||||
**kwargs)
|
||||
|
||||
def _to_snapshot(self, api_node):
|
||||
if 'snapshot' in api_node:
|
||||
api_node = api_node['snapshot']
|
||||
|
||||
extra = {'volume_id': api_node['volumeId'],
|
||||
'name': api_node['displayName'],
|
||||
'created': api_node['createdAt'],
|
||||
'description': api_node['displayDescription'],
|
||||
'status': api_node['status']}
|
||||
|
||||
snapshot = VolumeSnapshot(id=api_node['id'], driver=self,
|
||||
size=api_node['size'],
|
||||
extra=extra)
|
||||
return snapshot
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
endpoint_args = ENDPOINT_ARGS_MAP[self.region]
|
||||
kwargs = self.openstack_connection_kwargs()
|
||||
kwargs['region'] = self.region
|
||||
kwargs['get_endpoint_args'] = endpoint_args
|
||||
return kwargs
|
||||
337
awx/lib/site-packages/libcloud/compute/drivers/rimuhosting.py
Normal file
337
awx/lib/site-packages/libcloud/compute/drivers/rimuhosting.py
Normal file
@@ -0,0 +1,337 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
RimuHosting Driver
|
||||
"""
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.common.base import ConnectionKey, JsonResponse
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
|
||||
from libcloud.compute.base import NodeImage
|
||||
|
||||
API_CONTEXT = '/r'
|
||||
API_HOST = 'rimuhosting.com'
|
||||
|
||||
|
||||
class RimuHostingException(Exception):
|
||||
"""
|
||||
Exception class for RimuHosting driver
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return self.args[0]
|
||||
|
||||
def __repr__(self):
|
||||
return "<RimuHostingException '%s'>" % (self.args[0])
|
||||
|
||||
|
||||
class RimuHostingResponse(JsonResponse):
|
||||
"""
|
||||
Response Class for RimuHosting driver
|
||||
"""
|
||||
def success(self):
|
||||
if self.status == 403:
|
||||
raise InvalidCredsError()
|
||||
return True
|
||||
|
||||
def parse_body(self):
|
||||
try:
|
||||
js = super(RimuHostingResponse, self).parse_body()
|
||||
keys = list(js.keys())
|
||||
if js[keys[0]]['response_type'] == "ERROR":
|
||||
raise RimuHostingException(
|
||||
js[keys[0]]['human_readable_message']
|
||||
)
|
||||
return js[keys[0]]
|
||||
except KeyError:
|
||||
raise RimuHostingException('Could not parse body: %s'
|
||||
% (self.body))
|
||||
|
||||
|
||||
class RimuHostingConnection(ConnectionKey):
|
||||
"""
|
||||
Connection class for the RimuHosting driver
|
||||
"""
|
||||
|
||||
api_context = API_CONTEXT
|
||||
host = API_HOST
|
||||
port = 443
|
||||
responseCls = RimuHostingResponse
|
||||
|
||||
def __init__(self, key, secure=True):
|
||||
# override __init__ so that we can set secure of False for testing
|
||||
ConnectionKey.__init__(self, key, secure)
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
# We want JSON back from the server. Could be application/xml
|
||||
# (but JSON is better).
|
||||
headers['Accept'] = 'application/json'
|
||||
# Must encode all data as json, or override this header.
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key)
|
||||
return headers
|
||||
|
||||
def request(self, action, params=None, data='', headers=None,
|
||||
method='GET'):
|
||||
if not headers:
|
||||
headers = {}
|
||||
if not params:
|
||||
params = {}
|
||||
# Override this method to prepend the api_context
|
||||
return ConnectionKey.request(self, self.api_context + action,
|
||||
params, data, headers, method)
|
||||
|
||||
|
||||
class RimuHostingNodeDriver(NodeDriver):
|
||||
"""
|
||||
RimuHosting node driver
|
||||
"""
|
||||
|
||||
type = Provider.RIMUHOSTING
|
||||
name = 'RimuHosting'
|
||||
website = 'http://rimuhosting.com/'
|
||||
connectionCls = RimuHostingConnection
|
||||
features = {'create_node': ['password']}
|
||||
|
||||
def __init__(self, key, host=API_HOST, port=443,
|
||||
api_context=API_CONTEXT, secure=True):
|
||||
"""
|
||||
:param key: API key (required)
|
||||
:type key: ``str``
|
||||
|
||||
:param host: hostname for connection
|
||||
:type host: ``str``
|
||||
|
||||
:param port: Override port used for connections.
|
||||
:type port: ``int``
|
||||
|
||||
:param api_context: Optional API context.
|
||||
:type api_context: ``str``
|
||||
|
||||
:param secure: Weither to use HTTPS or HTTP.
|
||||
:type secure: ``bool``
|
||||
|
||||
:rtype: ``None``
|
||||
"""
|
||||
# Pass in some extra vars so that
|
||||
self.key = key
|
||||
self.secure = secure
|
||||
self.connection = self.connectionCls(key, secure)
|
||||
self.connection.host = host
|
||||
self.connection.api_context = api_context
|
||||
self.connection.port = port
|
||||
self.connection.driver = self
|
||||
self.connection.connect()
|
||||
|
||||
def _order_uri(self, node, resource):
|
||||
# Returns the order uri with its resourse appended.
|
||||
return "/orders/%s/%s" % (node.id, resource)
|
||||
|
||||
# TODO: Get the node state.
|
||||
def _to_node(self, order):
|
||||
n = Node(id=order['slug'],
|
||||
name=order['domain_name'],
|
||||
state=NodeState.RUNNING,
|
||||
public_ips=(
|
||||
[order['allocated_ips']['primary_ip']]
|
||||
+ order['allocated_ips']['secondary_ips']),
|
||||
private_ips=[],
|
||||
driver=self.connection.driver,
|
||||
extra={
|
||||
'order_oid': order['order_oid'],
|
||||
'monthly_recurring_fee': order.get(
|
||||
'billing_info').get('monthly_recurring_fee')})
|
||||
return n
|
||||
|
||||
def _to_size(self, plan):
|
||||
return NodeSize(
|
||||
id=plan['pricing_plan_code'],
|
||||
name=plan['pricing_plan_description'],
|
||||
ram=plan['minimum_memory_mb'],
|
||||
disk=plan['minimum_disk_gb'],
|
||||
bandwidth=plan['minimum_data_transfer_allowance_gb'],
|
||||
price=plan['monthly_recurring_amt']['amt_usd'],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
def _to_image(self, image):
|
||||
return NodeImage(id=image['distro_code'],
|
||||
name=image['distro_description'],
|
||||
driver=self.connection.driver)
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
# Returns a list of sizes (aka plans)
|
||||
# Get plans. Note this is really just for libcloud.
|
||||
# We are happy with any size.
|
||||
if location is None:
|
||||
location = ''
|
||||
else:
|
||||
location = ";dc_location=%s" % (location.id)
|
||||
|
||||
res = self.connection.request(
|
||||
'/pricing-plans;server-type=VPS%s' % (location)).object
|
||||
return list(map(lambda x: self._to_size(x), res['pricing_plan_infos']))
|
||||
|
||||
def list_nodes(self):
|
||||
# Returns a list of Nodes
|
||||
# Will only include active ones.
|
||||
res = self.connection.request('/orders;include_inactive=N').object
|
||||
return list(map(lambda x: self._to_node(x), res['about_orders']))
|
||||
|
||||
def list_images(self, location=None):
|
||||
# Get all base images.
|
||||
# TODO: add other image sources. (Such as a backup of a VPS)
|
||||
# All Images are available for use at all locations
|
||||
res = self.connection.request('/distributions').object
|
||||
return list(map(lambda x: self._to_image(x), res['distro_infos']))
|
||||
|
||||
def reboot_node(self, node):
|
||||
# Reboot
|
||||
# PUT the state of RESTARTING to restart a VPS.
|
||||
# All data is encoded as JSON
|
||||
data = {'reboot_request': {'running_state': 'RESTARTING'}}
|
||||
uri = self._order_uri(node, 'vps/running-state')
|
||||
self.connection.request(uri, data=json.dumps(data), method='PUT')
|
||||
# XXX check that the response was actually successful
|
||||
return True
|
||||
|
||||
def destroy_node(self, node):
|
||||
# Shutdown a VPS.
|
||||
uri = self._order_uri(node, 'vps')
|
||||
self.connection.request(uri, method='DELETE')
|
||||
# XXX check that the response was actually successful
|
||||
return True
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Creates a RimuHosting instance
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword name: Must be a FQDN. e.g example.com.
|
||||
:type name: ``str``
|
||||
|
||||
:keyword ex_billing_oid: If not set,
|
||||
a billing method is automatically picked.
|
||||
:type ex_billing_oid: ``str``
|
||||
|
||||
:keyword ex_host_server_oid: The host server to set the VPS up on.
|
||||
:type ex_host_server_oid: ``str``
|
||||
|
||||
:keyword ex_vps_order_oid_to_clone: Clone another VPS to use as
|
||||
the image for the new VPS.
|
||||
:type ex_vps_order_oid_to_clone: ``str``
|
||||
|
||||
:keyword ex_num_ips: Number of IPs to allocate. Defaults to 1.
|
||||
:type ex_num_ips: ``int``
|
||||
|
||||
:keyword ex_extra_ip_reason: Reason for needing the extra IPs.
|
||||
:type ex_extra_ip_reason: ``str``
|
||||
|
||||
:keyword ex_memory_mb: Memory to allocate to the VPS.
|
||||
:type ex_memory_mb: ``int``
|
||||
|
||||
:keyword ex_disk_space_mb: Diskspace to allocate to the VPS.
|
||||
Defaults to 4096 (4GB).
|
||||
:type ex_disk_space_mb: ``int``
|
||||
|
||||
:keyword ex_disk_space_2_mb: Secondary disk size allocation.
|
||||
Disabled by default.
|
||||
:type ex_disk_space_2_mb: ``int``
|
||||
|
||||
:keyword ex_control_panel: Control panel to install on the VPS.
|
||||
:type ex_control_panel: ``str``
|
||||
"""
|
||||
# Note we don't do much error checking in this because we
|
||||
# expect the API to error out if there is a problem.
|
||||
name = kwargs['name']
|
||||
image = kwargs['image']
|
||||
size = kwargs['size']
|
||||
|
||||
data = {
|
||||
'instantiation_options': {
|
||||
'domain_name': name,
|
||||
'distro': image.id
|
||||
},
|
||||
'pricing_plan_code': size.id,
|
||||
'vps_parameters': {}
|
||||
}
|
||||
|
||||
if 'ex_control_panel' in kwargs:
|
||||
data['instantiation_options']['control_panel'] = \
|
||||
kwargs['ex_control_panel']
|
||||
|
||||
auth = self._get_and_check_auth(kwargs.get('auth'))
|
||||
data['instantiation_options']['password'] = auth.password
|
||||
|
||||
if 'ex_billing_oid' in kwargs:
|
||||
# TODO check for valid oid.
|
||||
data['billing_oid'] = kwargs['ex_billing_oid']
|
||||
|
||||
if 'ex_host_server_oid' in kwargs:
|
||||
data['host_server_oid'] = kwargs['ex_host_server_oid']
|
||||
|
||||
if 'ex_vps_order_oid_to_clone' in kwargs:
|
||||
data['vps_order_oid_to_clone'] = \
|
||||
kwargs['ex_vps_order_oid_to_clone']
|
||||
|
||||
if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1:
|
||||
if 'ex_extra_ip_reason' not in kwargs:
|
||||
raise RimuHostingException(
|
||||
'Need an reason for having an extra IP')
|
||||
else:
|
||||
if 'ip_request' not in data:
|
||||
data['ip_request'] = {}
|
||||
data['ip_request']['num_ips'] = int(kwargs['ex_num_ips'])
|
||||
data['ip_request']['extra_ip_reason'] = \
|
||||
kwargs['ex_extra_ip_reason']
|
||||
|
||||
if 'ex_memory_mb' in kwargs:
|
||||
data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb']
|
||||
|
||||
if 'ex_disk_space_mb' in kwargs:
|
||||
data['vps_parameters']['disk_space_mb'] = \
|
||||
kwargs['ex_disk_space_mb']
|
||||
|
||||
if 'ex_disk_space_2_mb' in kwargs:
|
||||
data['vps_parameters']['disk_space_2_mb'] =\
|
||||
kwargs['ex_disk_space_2_mb']
|
||||
|
||||
# Don't send empty 'vps_parameters' attribute
|
||||
if not data['vps_parameters']:
|
||||
del data['vps_parameters']
|
||||
|
||||
res = self.connection.request(
|
||||
'/orders/new-vps',
|
||||
method='POST',
|
||||
data=json.dumps({"new-vps": data})
|
||||
).object
|
||||
node = self._to_node(res['about_order'])
|
||||
node.extra['password'] = \
|
||||
res['new_order_request']['instantiation_options']['password']
|
||||
return node
|
||||
|
||||
def list_locations(self):
|
||||
return [
|
||||
NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self),
|
||||
NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self),
|
||||
NodeLocation('DCLONDON', "RimuHosting London", 'GB', self),
|
||||
NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self),
|
||||
]
|
||||
83
awx/lib/site-packages/libcloud/compute/drivers/serverlove.py
Normal file
83
awx/lib/site-packages/libcloud/compute/drivers/serverlove.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
ServerLove Driver
|
||||
"""
|
||||
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver
|
||||
from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection
|
||||
|
||||
|
||||
# API end-points
|
||||
API_ENDPOINTS = {
|
||||
'uk-1': {
|
||||
'name': 'United Kingdom, Manchester',
|
||||
'country': 'United Kingdom',
|
||||
'host': 'api.z1-man.serverlove.com'
|
||||
}
|
||||
}
|
||||
|
||||
# Default API end-point for the base connection class.
|
||||
DEFAULT_ENDPOINT = 'uk-1'
|
||||
|
||||
# Retrieved from http://www.serverlove.com/cloud-server-faqs/api-questions/
|
||||
STANDARD_DRIVES = {
|
||||
'679f5f44-0be7-4745-a658-cccd4334c1aa': {
|
||||
'uuid': '679f5f44-0be7-4745-a658-cccd4334c1aa',
|
||||
'description': 'CentOS 5.5',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'5f2e0e29-2937-42b9-b362-d2d07eddbdeb': {
|
||||
'uuid': '5f2e0e29-2937-42b9-b362-d2d07eddbdeb',
|
||||
'description': 'Ubuntu Linux 10.04',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'5795b68f-ed26-4639-b41d-c93235062b6b': {
|
||||
'uuid': '5795b68f-ed26-4639-b41d-c93235062b6b',
|
||||
'description': 'Debian Linux 5',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'41993a02-0b22-4e49-bb47-0aa8975217e4': {
|
||||
'uuid': '41993a02-0b22-4e49-bb47-0aa8975217e4',
|
||||
'description': 'Windows Server 2008 R2 Standard',
|
||||
'size_gunzipped': '15GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
'85623ca1-9c2a-4398-a771-9a43c347e86b': {
|
||||
'uuid': '85623ca1-9c2a-4398-a771-9a43c347e86b',
|
||||
'description': 'Windows Web Server 2008 R2',
|
||||
'size_gunzipped': '15GB',
|
||||
'supports_deployment': False,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class ServerLoveConnection(ElasticStackBaseConnection):
|
||||
host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host']
|
||||
|
||||
|
||||
class ServerLoveNodeDriver(ElasticStackBaseNodeDriver):
|
||||
type = Provider.SERVERLOVE
|
||||
api_name = 'serverlove'
|
||||
website = 'http://www.serverlove.com/'
|
||||
name = 'ServerLove'
|
||||
connectionCls = ServerLoveConnection
|
||||
features = {'create_node': ['generates_password']}
|
||||
_standard_drives = STANDARD_DRIVES
|
||||
83
awx/lib/site-packages/libcloud/compute/drivers/skalicloud.py
Normal file
83
awx/lib/site-packages/libcloud/compute/drivers/skalicloud.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
skalicloud Driver
|
||||
"""
|
||||
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.drivers.elasticstack import ElasticStackBaseNodeDriver
|
||||
from libcloud.compute.drivers.elasticstack import ElasticStackBaseConnection
|
||||
|
||||
|
||||
# API end-points
|
||||
API_ENDPOINTS = {
|
||||
'my-1': {
|
||||
'name': 'Malaysia, Kuala Lumpur',
|
||||
'country': 'Malaysia',
|
||||
'host': 'api.sdg-my.skalicloud.com'
|
||||
}
|
||||
}
|
||||
|
||||
# Default API end-point for the base connection class.
|
||||
DEFAULT_ENDPOINT = 'my-1'
|
||||
|
||||
# Retrieved from http://www.skalicloud.com/cloud-api/
|
||||
STANDARD_DRIVES = {
|
||||
'90aa51f2-15c0-4cff-81ee-e93aa20b9468': {
|
||||
'uuid': '90aa51f2-15c0-4cff-81ee-e93aa20b9468',
|
||||
'description': 'CentOS 5.5 -64bit',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'c144d7a7-e24b-48ab-954b-6b6ec514ed6f': {
|
||||
'uuid': 'c144d7a7-e24b-48ab-954b-6b6ec514ed6f',
|
||||
'description': 'Debian 5 -64bit',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'3051699a-a536-4220-aeb5-67f2ec101a09': {
|
||||
'uuid': '3051699a-a536-4220-aeb5-67f2ec101a09',
|
||||
'description': 'Ubuntu Server 10.10 -64bit',
|
||||
'size_gunzipped': '1GB',
|
||||
'supports_deployment': True,
|
||||
},
|
||||
'11c4c922-5ff8-4094-b06c-eb8ffaec1ea9': {
|
||||
'uuid': '11c4c922-5ff8-4094-b06c-eb8ffaec1ea9',
|
||||
'description': 'Windows 2008R2 Web Edition',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
},
|
||||
'93bf390e-4f46-4252-a8bc-9d6d80e3f955': {
|
||||
'uuid': '93bf390e-4f46-4252-a8bc-9d6d80e3f955',
|
||||
'description': 'Windows Server 2008R2 Standard',
|
||||
'size_gunzipped': '13GB',
|
||||
'supports_deployment': False,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class SkaliCloudConnection(ElasticStackBaseConnection):
|
||||
host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host']
|
||||
|
||||
|
||||
class SkaliCloudNodeDriver(ElasticStackBaseNodeDriver):
|
||||
type = Provider.SKALICLOUD
|
||||
api_name = 'skalicloud'
|
||||
name = 'skalicloud'
|
||||
website = 'http://www.skalicloud.com/'
|
||||
connectionCls = SkaliCloudConnection
|
||||
features = {"create_node": ["generates_password"]}
|
||||
_standard_drives = STANDARD_DRIVES
|
||||
474
awx/lib/site-packages/libcloud/compute/drivers/softlayer.py
Normal file
474
awx/lib/site-packages/libcloud/compute/drivers/softlayer.py
Normal file
@@ -0,0 +1,474 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Softlayer driver
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey
|
||||
from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \
|
||||
NodeImage
|
||||
|
||||
DEFAULT_DOMAIN = 'example.com'
|
||||
DEFAULT_CPU_SIZE = 1
|
||||
DEFAULT_RAM_SIZE = 2048
|
||||
DEFAULT_DISK_SIZE = 100
|
||||
|
||||
DATACENTERS = {
|
||||
'hou02': {'country': 'US'},
|
||||
'sea01': {'country': 'US', 'name': 'Seattle - West Coast U.S.'},
|
||||
'wdc01': {'country': 'US', 'name': 'Washington, DC - East Coast U.S.'},
|
||||
'dal01': {'country': 'US'},
|
||||
'dal02': {'country': 'US'},
|
||||
'dal04': {'country': 'US'},
|
||||
'dal05': {'country': 'US', 'name': 'Dallas - Central U.S.'},
|
||||
'dal06': {'country': 'US'},
|
||||
'dal07': {'country': 'US'},
|
||||
'sjc01': {'country': 'US', 'name': 'San Jose - West Coast U.S.'},
|
||||
'sng01': {'country': 'SG', 'name': 'Singapore - Southeast Asia'},
|
||||
'ams01': {'country': 'NL', 'name': 'Amsterdam - Western Europe'},
|
||||
}
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'RUNNING': NodeState.RUNNING,
|
||||
'HALTED': NodeState.UNKNOWN,
|
||||
'PAUSED': NodeState.UNKNOWN,
|
||||
'INITIATING': NodeState.PENDING
|
||||
}
|
||||
|
||||
SL_BASE_TEMPLATES = [
|
||||
{
|
||||
'name': '1 CPU, 1GB ram, 25GB',
|
||||
'ram': 1024,
|
||||
'disk': 25,
|
||||
'cpus': 1,
|
||||
}, {
|
||||
'name': '1 CPU, 1GB ram, 100GB',
|
||||
'ram': 1024,
|
||||
'disk': 100,
|
||||
'cpus': 1,
|
||||
}, {
|
||||
'name': '1 CPU, 2GB ram, 100GB',
|
||||
'ram': 2 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 1,
|
||||
}, {
|
||||
'name': '1 CPU, 4GB ram, 100GB',
|
||||
'ram': 4 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 1,
|
||||
}, {
|
||||
'name': '2 CPU, 2GB ram, 100GB',
|
||||
'ram': 2 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 2,
|
||||
}, {
|
||||
'name': '2 CPU, 4GB ram, 100GB',
|
||||
'ram': 4 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 2,
|
||||
}, {
|
||||
'name': '2 CPU, 8GB ram, 100GB',
|
||||
'ram': 8 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 2,
|
||||
}, {
|
||||
'name': '4 CPU, 4GB ram, 100GB',
|
||||
'ram': 4 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 4,
|
||||
}, {
|
||||
'name': '4 CPU, 8GB ram, 100GB',
|
||||
'ram': 8 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 4,
|
||||
}, {
|
||||
'name': '6 CPU, 4GB ram, 100GB',
|
||||
'ram': 4 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 6,
|
||||
}, {
|
||||
'name': '6 CPU, 8GB ram, 100GB',
|
||||
'ram': 8 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 6,
|
||||
}, {
|
||||
'name': '8 CPU, 8GB ram, 100GB',
|
||||
'ram': 8 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 8,
|
||||
}, {
|
||||
'name': '8 CPU, 16GB ram, 100GB',
|
||||
'ram': 16 * 1024,
|
||||
'disk': 100,
|
||||
'cpus': 8,
|
||||
}]
|
||||
|
||||
SL_TEMPLATES = {}
|
||||
for i, template in enumerate(SL_BASE_TEMPLATES):
|
||||
# Add local disk templates
|
||||
local = template.copy()
|
||||
local['local_disk'] = True
|
||||
SL_TEMPLATES[i] = local
|
||||
|
||||
|
||||
class SoftLayerException(LibcloudError):
|
||||
"""
|
||||
Exception class for SoftLayer driver
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class SoftLayerResponse(XMLRPCResponse):
|
||||
defaultExceptionCls = SoftLayerException
|
||||
exceptions = {
|
||||
'SoftLayer_Account': InvalidCredsError,
|
||||
}
|
||||
|
||||
|
||||
class SoftLayerConnection(XMLRPCConnection, ConnectionUserAndKey):
|
||||
responseCls = SoftLayerResponse
|
||||
host = 'api.softlayer.com'
|
||||
endpoint = '/xmlrpc/v3'
|
||||
|
||||
def request(self, service, method, *args, **kwargs):
|
||||
headers = {}
|
||||
headers.update(self._get_auth_headers())
|
||||
headers.update(self._get_init_params(service, kwargs.get('id')))
|
||||
headers.update(
|
||||
self._get_object_mask(service, kwargs.get('object_mask')))
|
||||
headers.update(
|
||||
self._get_object_mask(service, kwargs.get('object_mask')))
|
||||
|
||||
args = ({'headers': headers}, ) + args
|
||||
endpoint = '%s/%s' % (self.endpoint, service)
|
||||
|
||||
return super(SoftLayerConnection, self).request(method, *args,
|
||||
**{'endpoint':
|
||||
endpoint})
|
||||
|
||||
def _get_auth_headers(self):
|
||||
return {
|
||||
'authenticate': {
|
||||
'username': self.user_id,
|
||||
'apiKey': self.key
|
||||
}
|
||||
}
|
||||
|
||||
def _get_init_params(self, service, id):
|
||||
if id is not None:
|
||||
return {
|
||||
'%sInitParameters' % service: {'id': id}
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
def _get_object_mask(self, service, mask):
|
||||
if mask is not None:
|
||||
return {
|
||||
'%sObjectMask' % service: {'mask': mask}
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
class SoftLayerNodeDriver(NodeDriver):
|
||||
"""
|
||||
SoftLayer node driver
|
||||
|
||||
Extra node attributes:
|
||||
- password: root password
|
||||
- hourlyRecurringFee: hourly price (if applicable)
|
||||
- recurringFee : flat rate (if applicable)
|
||||
- recurringMonths : The number of months in which the recurringFee
|
||||
will be incurred.
|
||||
"""
|
||||
connectionCls = SoftLayerConnection
|
||||
name = 'SoftLayer'
|
||||
website = 'http://www.softlayer.com/'
|
||||
type = Provider.SOFTLAYER
|
||||
|
||||
features = {'create_node': ['generates_password']}
|
||||
|
||||
def _to_node(self, host):
|
||||
try:
|
||||
password = \
|
||||
host['operatingSystem']['passwords'][0]['password']
|
||||
except (IndexError, KeyError):
|
||||
password = None
|
||||
|
||||
hourlyRecurringFee = host.get('billingItem', {}).get(
|
||||
'hourlyRecurringFee', 0)
|
||||
recurringFee = host.get('billingItem', {}).get('recurringFee', 0)
|
||||
recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0)
|
||||
createDate = host.get('createDate', None)
|
||||
|
||||
# When machine is launching it gets state halted
|
||||
# we change this to pending
|
||||
state = NODE_STATE_MAP.get(host['powerState']['keyName'],
|
||||
NodeState.UNKNOWN)
|
||||
|
||||
if not password and state == NodeState.UNKNOWN:
|
||||
state = NODE_STATE_MAP['INITIATING']
|
||||
|
||||
public_ips = []
|
||||
private_ips = []
|
||||
|
||||
if 'primaryIpAddress' in host:
|
||||
public_ips.append(host['primaryIpAddress'])
|
||||
|
||||
if 'primaryBackendIpAddress' in host:
|
||||
private_ips.append(host['primaryBackendIpAddress'])
|
||||
|
||||
image = host.get('operatingSystem', {}).get('softwareLicense', {}) \
|
||||
.get('softwareDescription', {}) \
|
||||
.get('longDescription', None)
|
||||
|
||||
return Node(
|
||||
id=host['id'],
|
||||
name=host['fullyQualifiedDomainName'],
|
||||
state=state,
|
||||
public_ips=public_ips,
|
||||
private_ips=private_ips,
|
||||
driver=self,
|
||||
extra={
|
||||
'hostname': host['hostname'],
|
||||
'fullyQualifiedDomainName': host['fullyQualifiedDomainName'],
|
||||
'password': password,
|
||||
'maxCpu': host.get('maxCpu', None),
|
||||
'datacenter': host.get('datacenter', {}).get('longName', None),
|
||||
'maxMemory': host.get('maxMemory', None),
|
||||
'image': image,
|
||||
'hourlyRecurringFee': hourlyRecurringFee,
|
||||
'recurringFee': recurringFee,
|
||||
'recurringMonths': recurringMonths,
|
||||
'created': createDate,
|
||||
}
|
||||
)
|
||||
|
||||
def destroy_node(self, node):
|
||||
self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'deleteObject', id=node.id
|
||||
)
|
||||
return True
|
||||
|
||||
def reboot_node(self, node):
|
||||
self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'rebootSoft', id=node.id
|
||||
)
|
||||
return True
|
||||
|
||||
def ex_stop_node(self, node):
|
||||
self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'powerOff', id=node.id
|
||||
)
|
||||
return True
|
||||
|
||||
def ex_start_node(self, node):
|
||||
self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'powerOn', id=node.id
|
||||
)
|
||||
return True
|
||||
|
||||
def _get_order_information(self, node_id, timeout=1200, check_interval=5):
|
||||
mask = {
|
||||
'billingItem': '',
|
||||
'powerState': '',
|
||||
'operatingSystem': {'passwords': ''},
|
||||
'provisionDate': '',
|
||||
}
|
||||
|
||||
for i in range(0, timeout, check_interval):
|
||||
res = self.connection.request(
|
||||
'SoftLayer_Virtual_Guest',
|
||||
'getObject',
|
||||
id=node_id,
|
||||
object_mask=mask
|
||||
).object
|
||||
|
||||
if res.get('provisionDate', None):
|
||||
return res
|
||||
|
||||
time.sleep(check_interval)
|
||||
|
||||
raise SoftLayerException('Timeout on getting node details')
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create a new SoftLayer node
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword ex_domain: e.g. libcloud.org
|
||||
:type ex_domain: ``str``
|
||||
:keyword ex_cpus: e.g. 2
|
||||
:type ex_cpus: ``int``
|
||||
:keyword ex_disk: e.g. 100
|
||||
:type ex_disk: ``int``
|
||||
:keyword ex_ram: e.g. 2048
|
||||
:type ex_ram: ``int``
|
||||
:keyword ex_bandwidth: e.g. 100
|
||||
:type ex_bandwidth: ``int``
|
||||
:keyword ex_local_disk: e.g. True
|
||||
:type ex_local_disk: ``bool``
|
||||
:keyword ex_datacenter: e.g. Dal05
|
||||
:type ex_datacenter: ``str``
|
||||
:keyword ex_os: e.g. UBUNTU_LATEST
|
||||
:type ex_os: ``str``
|
||||
"""
|
||||
name = kwargs['name']
|
||||
os = 'DEBIAN_LATEST'
|
||||
if 'ex_os' in kwargs:
|
||||
os = kwargs['ex_os']
|
||||
elif 'image' in kwargs:
|
||||
os = kwargs['image'].id
|
||||
|
||||
size = kwargs.get('size', NodeSize(id=123, name='Custom', ram=None,
|
||||
disk=None, bandwidth=None,
|
||||
price=None,
|
||||
driver=self.connection.driver))
|
||||
ex_size_data = SL_TEMPLATES.get(int(size.id)) or {}
|
||||
# plan keys are ints
|
||||
cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or \
|
||||
DEFAULT_CPU_SIZE
|
||||
ram = kwargs.get('ex_ram') or ex_size_data.get('ram') or \
|
||||
DEFAULT_RAM_SIZE
|
||||
bandwidth = kwargs.get('ex_bandwidth') or size.bandwidth or 10
|
||||
hourly = 'true' if kwargs.get('ex_hourly', True) else 'false'
|
||||
|
||||
local_disk = 'true'
|
||||
if ex_size_data.get('local_disk') is False:
|
||||
local_disk = 'false'
|
||||
|
||||
if kwargs.get('ex_local_disk') is False:
|
||||
local_disk = 'false'
|
||||
|
||||
disk_size = DEFAULT_DISK_SIZE
|
||||
if size.disk:
|
||||
disk_size = size.disk
|
||||
if kwargs.get('ex_disk'):
|
||||
disk_size = kwargs.get('ex_disk')
|
||||
|
||||
datacenter = ''
|
||||
if 'ex_datacenter' in kwargs:
|
||||
datacenter = kwargs['ex_datacenter']
|
||||
elif 'location' in kwargs:
|
||||
datacenter = kwargs['location'].id
|
||||
|
||||
domain = kwargs.get('ex_domain')
|
||||
if domain is None:
|
||||
if name.find('.') != -1:
|
||||
domain = name[name.find('.') + 1:]
|
||||
if domain is None:
|
||||
# TODO: domain is a required argument for the Sofylayer API, but it
|
||||
# it shouldn't be.
|
||||
domain = DEFAULT_DOMAIN
|
||||
|
||||
newCCI = {
|
||||
'hostname': name,
|
||||
'domain': domain,
|
||||
'startCpus': cpu_count,
|
||||
'maxMemory': ram,
|
||||
'networkComponents': [{'maxSpeed': bandwidth}],
|
||||
'hourlyBillingFlag': hourly,
|
||||
'operatingSystemReferenceCode': os,
|
||||
'localDiskFlag': local_disk,
|
||||
'blockDevices': [
|
||||
{
|
||||
'device': '0',
|
||||
'diskImage': {
|
||||
'capacity': disk_size,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
}
|
||||
|
||||
if datacenter:
|
||||
newCCI['datacenter'] = {'name': datacenter}
|
||||
|
||||
res = self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'createObject', newCCI
|
||||
).object
|
||||
|
||||
node_id = res['id']
|
||||
raw_node = self._get_order_information(node_id)
|
||||
|
||||
return self._to_node(raw_node)
|
||||
|
||||
def _to_image(self, img):
|
||||
return NodeImage(
|
||||
id=img['template']['operatingSystemReferenceCode'],
|
||||
name=img['itemPrice']['item']['description'],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
def list_images(self, location=None):
|
||||
result = self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'getCreateObjectOptions'
|
||||
).object
|
||||
return [self._to_image(i) for i in result['operatingSystems']]
|
||||
|
||||
def _to_size(self, id, size):
|
||||
return NodeSize(
|
||||
id=id,
|
||||
name=size['name'],
|
||||
ram=size['ram'],
|
||||
disk=size['disk'],
|
||||
bandwidth=size.get('bandwidth'),
|
||||
price=None,
|
||||
driver=self.connection.driver,
|
||||
)
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
return [self._to_size(id, s) for id, s in SL_TEMPLATES.items()]
|
||||
|
||||
def _to_loc(self, loc):
|
||||
country = 'UNKNOWN'
|
||||
loc_id = loc['template']['datacenter']['name']
|
||||
name = loc_id
|
||||
|
||||
if loc_id in DATACENTERS:
|
||||
country = DATACENTERS[loc_id]['country']
|
||||
name = DATACENTERS[loc_id].get('name', loc_id)
|
||||
return NodeLocation(id=loc_id, name=name,
|
||||
country=country, driver=self)
|
||||
|
||||
def list_locations(self):
|
||||
res = self.connection.request(
|
||||
'SoftLayer_Virtual_Guest', 'getCreateObjectOptions'
|
||||
).object
|
||||
return [self._to_loc(l) for l in res['datacenters']]
|
||||
|
||||
def list_nodes(self):
|
||||
mask = {
|
||||
'virtualGuests': {
|
||||
'powerState': '',
|
||||
'hostname': '',
|
||||
'maxMemory': '',
|
||||
'datacenter': '',
|
||||
'operatingSystem': {'passwords': ''},
|
||||
'billingItem': '',
|
||||
},
|
||||
}
|
||||
res = self.connection.request(
|
||||
"SoftLayer_Account",
|
||||
"getVirtualGuests",
|
||||
object_mask=mask
|
||||
).object
|
||||
return [self._to_node(h) for h in res]
|
||||
302
awx/lib/site-packages/libcloud/compute/drivers/vcl.py
Normal file
302
awx/lib/site-packages/libcloud/compute/drivers/vcl.py
Normal file
@@ -0,0 +1,302 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
VCL driver
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey
|
||||
from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.compute.types import Provider, NodeState
|
||||
from libcloud.compute.base import NodeDriver, Node
|
||||
from libcloud.compute.base import NodeSize, NodeImage
|
||||
|
||||
|
||||
class VCLResponse(XMLRPCResponse):
|
||||
exceptions = {
|
||||
'VCL_Account': InvalidCredsError,
|
||||
}
|
||||
|
||||
|
||||
class VCLConnection(XMLRPCConnection, ConnectionUserAndKey):
|
||||
endpoint = '/index.php?mode=xmlrpccall'
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['X-APIVERSION'] = '2'
|
||||
headers['X-User'] = self.user_id
|
||||
headers['X-Pass'] = self.key
|
||||
return headers
|
||||
|
||||
|
||||
class VCLNodeDriver(NodeDriver):
|
||||
"""
|
||||
VCL node driver
|
||||
|
||||
:keyword host: The VCL host to which you make requests(required)
|
||||
:type host: ``str``
|
||||
"""
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'ready': NodeState.RUNNING,
|
||||
'failed': NodeState.TERMINATED,
|
||||
'timedout': NodeState.TERMINATED,
|
||||
'loading': NodeState.PENDING,
|
||||
'time': NodeState.PENDING,
|
||||
'future': NodeState.PENDING,
|
||||
'error': NodeState.UNKNOWN,
|
||||
'notready': NodeState.PENDING,
|
||||
'notavailable': NodeState.TERMINATED,
|
||||
'success': NodeState.PENDING
|
||||
}
|
||||
|
||||
connectionCls = VCLConnection
|
||||
name = 'VCL'
|
||||
website = 'http://incubator.apache.org/vcl/'
|
||||
type = Provider.VCL
|
||||
|
||||
def __init__(self, key, secret, secure=True, host=None, port=None, *args,
|
||||
**kwargs):
|
||||
"""
|
||||
:param key: API key or username to used (required)
|
||||
:type key: ``str``
|
||||
|
||||
:param secret: Secret password to be used (required)
|
||||
:type secret: ``str``
|
||||
|
||||
:param secure: Weither to use HTTPS or HTTP.
|
||||
:type secure: ``bool``
|
||||
|
||||
:param host: Override hostname used for connections. (required)
|
||||
:type host: ``str``
|
||||
|
||||
:param port: Override port used for connections.
|
||||
:type port: ``int``
|
||||
|
||||
:rtype: ``None``
|
||||
"""
|
||||
if not host:
|
||||
raise Exception('When instantiating VCL driver directly ' +
|
||||
'you also need to provide host')
|
||||
|
||||
super(VCLNodeDriver, self).__init__(key, secret, secure=True,
|
||||
host=None, port=None, *args,
|
||||
**kwargs)
|
||||
|
||||
def _vcl_request(self, method, *args):
|
||||
res = self.connection.request(
|
||||
method,
|
||||
*args
|
||||
).object
|
||||
if(res['status'] == 'error'):
|
||||
raise LibcloudError(res['errormsg'], driver=self)
|
||||
return res
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create a new VCL reservation
|
||||
size and name ignored, image is the id from list_image
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword image: image is the id from list_image
|
||||
:type image: ``str``
|
||||
|
||||
:keyword start: start time as unix timestamp
|
||||
:type start: ``str``
|
||||
|
||||
:keyword length: length of time in minutes
|
||||
:type length: ``str``
|
||||
"""
|
||||
|
||||
image = kwargs["image"]
|
||||
start = kwargs.get('start', int(time.time()))
|
||||
length = kwargs.get('length', '60')
|
||||
|
||||
res = self._vcl_request(
|
||||
"XMLRPCaddRequest",
|
||||
image.id,
|
||||
start,
|
||||
length
|
||||
)
|
||||
|
||||
return Node(
|
||||
id=res['requestid'],
|
||||
name=image.name,
|
||||
state=self.NODE_STATE_MAP[res['status']],
|
||||
public_ips=[],
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
image=image.name
|
||||
)
|
||||
|
||||
def destroy_node(self, node):
|
||||
"""
|
||||
End VCL reservation for the node passed in.
|
||||
Throws error if request fails.
|
||||
|
||||
:param node: The node to be destroyed
|
||||
:type node: :class:`Node`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
try:
|
||||
self._vcl_request(
|
||||
'XMLRPCendRequest',
|
||||
node.id
|
||||
)
|
||||
except LibcloudError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _to_image(self, img):
|
||||
return NodeImage(
|
||||
id=img['id'],
|
||||
name=img['name'],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
def list_images(self, location=None):
|
||||
"""
|
||||
List images available to the user provided credentials
|
||||
|
||||
@inherits: :class:`NodeDriver.list_images`
|
||||
"""
|
||||
res = self.connection.request(
|
||||
"XMLRPCgetImages"
|
||||
).object
|
||||
return [self._to_image(i) for i in res]
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
"""
|
||||
VCL does not choosing sizes for node creation.
|
||||
Size of images are statically set by administrators.
|
||||
|
||||
@inherits: :class:`NodeDriver.list_sizes`
|
||||
"""
|
||||
return [NodeSize(
|
||||
't1.micro',
|
||||
'none',
|
||||
'512',
|
||||
0, 0, 0, self)
|
||||
]
|
||||
|
||||
def _to_connect_data(self, request_id, ipaddr):
|
||||
res = self._vcl_request(
|
||||
"XMLRPCgetRequestConnectData",
|
||||
request_id,
|
||||
ipaddr
|
||||
)
|
||||
return res
|
||||
|
||||
def _to_status(self, requestid, imagename, ipaddr):
|
||||
res = self._vcl_request(
|
||||
"XMLRPCgetRequestStatus",
|
||||
requestid
|
||||
)
|
||||
|
||||
public_ips = []
|
||||
extra = []
|
||||
if(res['status'] == 'ready'):
|
||||
cdata = self._to_connect_data(requestid, ipaddr)
|
||||
public_ips = [cdata['serverIP']]
|
||||
extra = {
|
||||
'user': cdata['user'],
|
||||
'pass': cdata['password']
|
||||
}
|
||||
return Node(
|
||||
id=requestid,
|
||||
name=imagename,
|
||||
state=self.NODE_STATE_MAP[res['status']],
|
||||
public_ips=public_ips,
|
||||
private_ips=[],
|
||||
driver=self,
|
||||
image=imagename,
|
||||
extra=extra
|
||||
)
|
||||
|
||||
def _to_nodes(self, res, ipaddr):
|
||||
return [self._to_status(
|
||||
h['requestid'],
|
||||
h['imagename'],
|
||||
ipaddr
|
||||
) for h in res]
|
||||
|
||||
def list_nodes(self, ipaddr):
|
||||
"""
|
||||
List nodes
|
||||
|
||||
:param ipaddr: IP address which should be used
|
||||
:type ipaddr: ``str``
|
||||
|
||||
:rtype: ``list`` of :class:`Node`
|
||||
"""
|
||||
res = self._vcl_request(
|
||||
"XMLRPCgetRequestIds"
|
||||
)
|
||||
return self._to_nodes(res['requests'], ipaddr)
|
||||
|
||||
def ex_update_node_access(self, node, ipaddr):
|
||||
"""
|
||||
Update the remote ip accessing the node.
|
||||
|
||||
:param node: the reservation node to update
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param ipaddr: the ipaddr used to access the node
|
||||
:type ipaddr: ``str``
|
||||
|
||||
:return: node with updated information
|
||||
:rtype: :class:`Node`
|
||||
"""
|
||||
return self._to_status(node.id, node.image, ipaddr)
|
||||
|
||||
def ex_extend_request_time(self, node, minutes):
|
||||
"""
|
||||
Time in minutes to extend the requested node's reservation time
|
||||
|
||||
:param node: the reservation node to update
|
||||
:type node: :class:`Node`
|
||||
|
||||
:param minutes: the number of mintes to update
|
||||
:type minutes: ``str``
|
||||
|
||||
:return: true on success, throws error on failure
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
return self._vcl_request(
|
||||
"XMLRPCextendRequest",
|
||||
node.id,
|
||||
minutes
|
||||
)
|
||||
|
||||
def ex_get_request_end_time(self, node):
|
||||
"""
|
||||
Get the ending time of the node reservation.
|
||||
|
||||
:param node: the reservation node to update
|
||||
:type node: :class:`Node`
|
||||
|
||||
:return: unix timestamp
|
||||
:rtype: ``int``
|
||||
"""
|
||||
res = self._vcl_request(
|
||||
"XMLRPCgetRequestIds"
|
||||
)
|
||||
time = 0
|
||||
for i in res['requests']:
|
||||
if i['requestid'] == node.id:
|
||||
time = i['end']
|
||||
return time
|
||||
2090
awx/lib/site-packages/libcloud/compute/drivers/vcloud.py
Normal file
2090
awx/lib/site-packages/libcloud/compute/drivers/vcloud.py
Normal file
File diff suppressed because it is too large
Load Diff
307
awx/lib/site-packages/libcloud/compute/drivers/voxel.py
Normal file
307
awx/lib/site-packages/libcloud/compute/drivers/voxel.py
Normal file
@@ -0,0 +1,307 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Voxel VoxCloud driver
|
||||
"""
|
||||
import datetime
|
||||
import hashlib
|
||||
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
|
||||
from libcloud.common.types import InvalidCredsError
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.types import NodeState
|
||||
from libcloud.compute.base import Node, NodeDriver
|
||||
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
|
||||
|
||||
VOXEL_API_HOST = "api.voxel.net"
|
||||
|
||||
|
||||
class VoxelResponse(XmlResponse):
|
||||
def __init__(self, response, connection):
|
||||
self.parsed = None
|
||||
super(VoxelResponse, self).__init__(response=response,
|
||||
connection=connection)
|
||||
|
||||
def parse_body(self):
|
||||
if not self.body:
|
||||
return None
|
||||
if not self.parsed:
|
||||
self.parsed = super(VoxelResponse, self).parse_body()
|
||||
return self.parsed
|
||||
|
||||
def parse_error(self):
|
||||
err_list = []
|
||||
if not self.body:
|
||||
return None
|
||||
if not self.parsed:
|
||||
self.parsed = super(VoxelResponse, self).parse_body()
|
||||
for err in self.parsed.findall('err'):
|
||||
code = err.get('code')
|
||||
err_list.append("(%s) %s" % (code, err.get('msg')))
|
||||
# From voxel docs:
|
||||
# 1: Invalid login or password
|
||||
# 9: Permission denied: user lacks access rights for this method
|
||||
if code == "1" or code == "9":
|
||||
# sucks, but only way to detect
|
||||
# bad authentication tokens so far
|
||||
raise InvalidCredsError(err_list[-1])
|
||||
return "\n".join(err_list)
|
||||
|
||||
def success(self):
|
||||
if not self.parsed:
|
||||
self.parsed = super(VoxelResponse, self).parse_body()
|
||||
stat = self.parsed.get('stat')
|
||||
if stat != "ok":
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class VoxelConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the Voxel driver
|
||||
"""
|
||||
|
||||
host = VOXEL_API_HOST
|
||||
responseCls = VoxelResponse
|
||||
|
||||
def add_default_params(self, params):
|
||||
params = dict([(k, v) for k, v in list(params.items())
|
||||
if v is not None])
|
||||
params["key"] = self.user_id
|
||||
params["timestamp"] = datetime.datetime.utcnow().isoformat() + "+0000"
|
||||
|
||||
keys = list(params.keys())
|
||||
keys.sort()
|
||||
|
||||
md5 = hashlib.md5()
|
||||
md5.update(b(self.key))
|
||||
for key in keys:
|
||||
if params[key]:
|
||||
if not params[key] is None:
|
||||
md5.update(b("%s%s" % (key, params[key])))
|
||||
else:
|
||||
md5.update(b(key))
|
||||
params['api_sig'] = md5.hexdigest()
|
||||
return params
|
||||
|
||||
VOXEL_INSTANCE_TYPES = {}
|
||||
RAM_PER_CPU = 2048
|
||||
|
||||
NODE_STATE_MAP = {
|
||||
'IN_PROGRESS': NodeState.PENDING,
|
||||
'QUEUED': NodeState.PENDING,
|
||||
'SUCCEEDED': NodeState.RUNNING,
|
||||
'shutting-down': NodeState.TERMINATED,
|
||||
'terminated': NodeState.TERMINATED,
|
||||
'unknown': NodeState.UNKNOWN,
|
||||
}
|
||||
|
||||
|
||||
class VoxelNodeDriver(NodeDriver):
|
||||
"""
|
||||
Voxel VoxCLOUD node driver
|
||||
"""
|
||||
|
||||
connectionCls = VoxelConnection
|
||||
type = Provider.VOXEL
|
||||
name = 'Voxel VoxCLOUD'
|
||||
website = 'http://www.voxel.net/'
|
||||
|
||||
def _initialize_instance_types():
|
||||
for cpus in range(1, 14):
|
||||
if cpus == 1:
|
||||
name = "Single CPU"
|
||||
else:
|
||||
name = "%d CPUs" % cpus
|
||||
id = "%dcpu" % cpus
|
||||
ram = cpus * RAM_PER_CPU
|
||||
|
||||
VOXEL_INSTANCE_TYPES[id] = {
|
||||
'id': id,
|
||||
'name': name,
|
||||
'ram': ram,
|
||||
'disk': None,
|
||||
'bandwidth': None,
|
||||
'price': None}
|
||||
|
||||
features = {"create_node": [],
|
||||
"list_sizes": ["variable_disk"]}
|
||||
|
||||
_initialize_instance_types()
|
||||
|
||||
def list_nodes(self):
|
||||
params = {"method": "voxel.devices.list"}
|
||||
result = self.connection.request('/', params=params).object
|
||||
return self._to_nodes(result)
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
return [NodeSize(driver=self.connection.driver, **i)
|
||||
for i in list(VOXEL_INSTANCE_TYPES.values())]
|
||||
|
||||
def list_images(self, location=None):
|
||||
params = {"method": "voxel.images.list"}
|
||||
result = self.connection.request('/', params=params).object
|
||||
return self._to_images(result)
|
||||
|
||||
def create_node(self, **kwargs):
|
||||
"""Create Voxel Node
|
||||
|
||||
:keyword name: the name to assign the node (mandatory)
|
||||
:type name: ``str``
|
||||
|
||||
:keyword image: distribution to deploy
|
||||
:type image: :class:`NodeImage`
|
||||
|
||||
:keyword size: the plan size to create (mandatory)
|
||||
Requires size.disk (GB) to be set manually
|
||||
:type size: :class:`NodeSize`
|
||||
|
||||
:keyword location: which datacenter to create the node in
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:keyword ex_privateip: Backend IP address to assign to node;
|
||||
must be chosen from the customer's
|
||||
private VLAN assignment.
|
||||
:type ex_privateip: ``str``
|
||||
|
||||
:keyword ex_publicip: Public-facing IP address to assign to node;
|
||||
must be chosen from the customer's
|
||||
public VLAN assignment.
|
||||
:type ex_publicip: ``str``
|
||||
|
||||
:keyword ex_rootpass: Password for root access; generated if unset.
|
||||
:type ex_rootpass: ``str``
|
||||
|
||||
:keyword ex_consolepass: Password for remote console;
|
||||
generated if unset.
|
||||
:type ex_consolepass: ``str``
|
||||
|
||||
:keyword ex_sshuser: Username for SSH access
|
||||
:type ex_sshuser: ``str``
|
||||
|
||||
:keyword ex_sshpass: Password for SSH access; generated if unset.
|
||||
:type ex_sshpass: ``str``
|
||||
|
||||
:keyword ex_voxel_access: Allow access Voxel administrative access.
|
||||
Defaults to False.
|
||||
:type ex_voxel_access: ``bool``
|
||||
|
||||
:rtype: :class:`Node` or ``None``
|
||||
"""
|
||||
|
||||
# assert that disk > 0
|
||||
if not kwargs["size"].disk:
|
||||
raise ValueError("size.disk must be non-zero")
|
||||
|
||||
# convert voxel_access to string boolean if needed
|
||||
voxel_access = kwargs.get("ex_voxel_access", None)
|
||||
if voxel_access is not None:
|
||||
voxel_access = "true" if voxel_access else "false"
|
||||
|
||||
params = {
|
||||
'method': 'voxel.voxcloud.create',
|
||||
'hostname': kwargs["name"],
|
||||
'disk_size': int(kwargs["size"].disk),
|
||||
'facility': kwargs["location"].id,
|
||||
'image_id': kwargs["image"].id,
|
||||
'processing_cores': kwargs["size"].ram / RAM_PER_CPU,
|
||||
'backend_ip': kwargs.get("ex_privateip", None),
|
||||
'frontend_ip': kwargs.get("ex_publicip", None),
|
||||
'admin_password': kwargs.get("ex_rootpass", None),
|
||||
'console_password': kwargs.get("ex_consolepass", None),
|
||||
'ssh_username': kwargs.get("ex_sshuser", None),
|
||||
'ssh_password': kwargs.get("ex_sshpass", None),
|
||||
'voxel_access': voxel_access,
|
||||
}
|
||||
|
||||
object = self.connection.request('/', params=params).object
|
||||
|
||||
if self._getstatus(object):
|
||||
return Node(
|
||||
id=object.findtext("device/id"),
|
||||
name=kwargs["name"],
|
||||
state=NODE_STATE_MAP[object.findtext("device/status")],
|
||||
public_ips=kwargs.get("publicip", None),
|
||||
private_ips=kwargs.get("privateip", None),
|
||||
driver=self.connection.driver
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
def reboot_node(self, node):
|
||||
params = {'method': 'voxel.devices.power',
|
||||
'device_id': node.id,
|
||||
'power_action': 'reboot'}
|
||||
return self._getstatus(
|
||||
self.connection.request('/', params=params).object)
|
||||
|
||||
def destroy_node(self, node):
|
||||
params = {'method': 'voxel.voxcloud.delete',
|
||||
'device_id': node.id}
|
||||
return self._getstatus(
|
||||
self.connection.request('/', params=params).object)
|
||||
|
||||
def list_locations(self):
|
||||
params = {"method": "voxel.voxcloud.facilities.list"}
|
||||
result = self.connection.request('/', params=params).object
|
||||
nodes = self._to_locations(result)
|
||||
return nodes
|
||||
|
||||
def _getstatus(self, element):
|
||||
status = element.attrib["stat"]
|
||||
return status == "ok"
|
||||
|
||||
def _to_locations(self, object):
|
||||
return [NodeLocation(element.attrib["label"],
|
||||
element.findtext("description"),
|
||||
element.findtext("description"),
|
||||
self)
|
||||
for element in object.findall('facilities/facility')]
|
||||
|
||||
def _to_nodes(self, object):
|
||||
nodes = []
|
||||
for element in object.findall('devices/device'):
|
||||
if element.findtext("type") == "Virtual Server":
|
||||
try:
|
||||
state = self.NODE_STATE_MAP[element.attrib['status']]
|
||||
except KeyError:
|
||||
state = NodeState.UNKNOWN
|
||||
|
||||
public_ip = private_ip = None
|
||||
ipassignments = element.findall("ipassignments/ipassignment")
|
||||
for ip in ipassignments:
|
||||
if ip.attrib["type"] == "frontend":
|
||||
public_ip = ip.text
|
||||
elif ip.attrib["type"] == "backend":
|
||||
private_ip = ip.text
|
||||
|
||||
nodes.append(Node(id=element.attrib['id'],
|
||||
name=element.attrib['label'],
|
||||
state=state,
|
||||
public_ips=public_ip,
|
||||
private_ips=private_ip,
|
||||
driver=self.connection.driver))
|
||||
return nodes
|
||||
|
||||
def _to_images(self, object):
|
||||
images = []
|
||||
for element in object.findall("images/image"):
|
||||
images.append(NodeImage(id=element.attrib["id"],
|
||||
name=element.attrib["summary"],
|
||||
driver=self.connection.driver))
|
||||
return images
|
||||
193
awx/lib/site-packages/libcloud/compute/drivers/vpsnet.py
Normal file
193
awx/lib/site-packages/libcloud/compute/drivers/vpsnet.py
Normal file
@@ -0,0 +1,193 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
VPS.net driver
|
||||
"""
|
||||
import base64
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
|
||||
from libcloud.common.types import InvalidCredsError, MalformedResponseError
|
||||
from libcloud.compute.providers import Provider
|
||||
from libcloud.compute.types import NodeState
|
||||
from libcloud.compute.base import Node, NodeDriver
|
||||
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
|
||||
|
||||
API_HOST = 'api.vps.net'
|
||||
API_VERSION = 'api10json'
|
||||
|
||||
RAM_PER_NODE = 256
|
||||
DISK_PER_NODE = 10
|
||||
BANDWIDTH_PER_NODE = 250
|
||||
|
||||
|
||||
class VPSNetResponse(JsonResponse):
|
||||
def parse_body(self):
|
||||
try:
|
||||
return super(VPSNetResponse, self).parse_body()
|
||||
except MalformedResponseError:
|
||||
return self.body
|
||||
|
||||
def success(self):
|
||||
# vps.net wrongly uses 406 for invalid auth creds
|
||||
if self.status == 406 or self.status == 403:
|
||||
raise InvalidCredsError()
|
||||
return True
|
||||
|
||||
def parse_error(self):
|
||||
try:
|
||||
errors = super(VPSNetResponse, self).parse_body()['errors'][0]
|
||||
except MalformedResponseError:
|
||||
return self.body
|
||||
else:
|
||||
return "\n".join(errors)
|
||||
|
||||
|
||||
class VPSNetConnection(ConnectionUserAndKey):
|
||||
"""
|
||||
Connection class for the VPS.net driver
|
||||
"""
|
||||
|
||||
host = API_HOST
|
||||
responseCls = VPSNetResponse
|
||||
|
||||
allow_insecure = False
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
|
||||
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
|
||||
return headers
|
||||
|
||||
|
||||
class VPSNetNodeDriver(NodeDriver):
|
||||
"""
|
||||
VPS.net node driver
|
||||
"""
|
||||
|
||||
type = Provider.VPSNET
|
||||
api_name = 'vps_net'
|
||||
name = "vps.net"
|
||||
website = 'http://vps.net/'
|
||||
connectionCls = VPSNetConnection
|
||||
|
||||
def _to_node(self, vm):
|
||||
if vm['running']:
|
||||
state = NodeState.RUNNING
|
||||
else:
|
||||
state = NodeState.PENDING
|
||||
|
||||
n = Node(id=vm['id'],
|
||||
name=vm['label'],
|
||||
state=state,
|
||||
public_ips=[vm.get('primary_ip_address', None)],
|
||||
private_ips=[],
|
||||
extra={'slices_count': vm['slices_count']},
|
||||
# Number of nodes consumed by VM
|
||||
driver=self.connection.driver)
|
||||
return n
|
||||
|
||||
def _to_image(self, image, cloud):
|
||||
image = NodeImage(id=image['id'],
|
||||
name="%s: %s" % (cloud, image['label']),
|
||||
driver=self.connection.driver)
|
||||
|
||||
return image
|
||||
|
||||
def _to_size(self, num):
|
||||
size = NodeSize(id=num,
|
||||
name="%d Node" % (num,),
|
||||
ram=RAM_PER_NODE * num,
|
||||
disk=DISK_PER_NODE,
|
||||
bandwidth=BANDWIDTH_PER_NODE * num,
|
||||
price=self._get_price_per_node(num) * num,
|
||||
driver=self.connection.driver)
|
||||
return size
|
||||
|
||||
def _get_price_per_node(self, num):
|
||||
single_node_price = self._get_size_price(size_id='1')
|
||||
return num * single_node_price
|
||||
|
||||
def create_node(self, name, image, size, **kwargs):
|
||||
"""Create a new VPS.net node
|
||||
|
||||
@inherits: :class:`NodeDriver.create_node`
|
||||
|
||||
:keyword ex_backups_enabled: Enable automatic backups
|
||||
:type ex_backups_enabled: ``bool``
|
||||
|
||||
:keyword ex_fqdn: Fully Qualified domain of the node
|
||||
:type ex_fqdn: ``str``
|
||||
"""
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
request = {'virtual_machine':
|
||||
{'label': name,
|
||||
'fqdn': kwargs.get('ex_fqdn', ''),
|
||||
'system_template_id': image.id,
|
||||
'backups_enabled': kwargs.get('ex_backups_enabled', 0),
|
||||
'slices_required': size.id}}
|
||||
|
||||
res = self.connection.request('/virtual_machines.%s' % (API_VERSION,),
|
||||
data=json.dumps(request),
|
||||
headers=headers,
|
||||
method='POST')
|
||||
node = self._to_node(res.object['virtual_machine'])
|
||||
return node
|
||||
|
||||
def reboot_node(self, node):
|
||||
res = self.connection.request(
|
||||
'/virtual_machines/%s/%s.%s' % (node.id,
|
||||
'reboot',
|
||||
API_VERSION),
|
||||
method="POST")
|
||||
node = self._to_node(res.object['virtual_machine'])
|
||||
return True
|
||||
|
||||
def list_sizes(self, location=None):
|
||||
res = self.connection.request('/nodes.%s' % (API_VERSION,))
|
||||
available_nodes = len([size for size in res.object
|
||||
if size['slice']['virtual_machine_id']])
|
||||
sizes = [self._to_size(i) for i in range(1, available_nodes + 1)]
|
||||
return sizes
|
||||
|
||||
def destroy_node(self, node):
|
||||
res = self.connection.request('/virtual_machines/%s.%s'
|
||||
% (node.id, API_VERSION),
|
||||
method='DELETE')
|
||||
return res.status == 200
|
||||
|
||||
def list_nodes(self):
|
||||
res = self.connection.request('/virtual_machines.%s' % (API_VERSION,))
|
||||
return [self._to_node(i['virtual_machine']) for i in res.object]
|
||||
|
||||
def list_images(self, location=None):
|
||||
res = self.connection.request('/available_clouds.%s' % (API_VERSION,))
|
||||
|
||||
images = []
|
||||
for cloud in res.object:
|
||||
label = cloud['cloud']['label']
|
||||
templates = cloud['cloud']['system_templates']
|
||||
images.extend([self._to_image(image, label)
|
||||
for image in templates])
|
||||
|
||||
return images
|
||||
|
||||
def list_locations(self):
|
||||
return [NodeLocation(0, "VPS.net Western US", 'US', self)]
|
||||
175
awx/lib/site-packages/libcloud/compute/providers.py
Normal file
175
awx/lib/site-packages/libcloud/compute/providers.py
Normal file
@@ -0,0 +1,175 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Provider related utilities
|
||||
"""
|
||||
|
||||
from libcloud.utils.misc import get_driver as _get_provider_driver
|
||||
from libcloud.utils.misc import set_driver as _set_provider_driver
|
||||
from libcloud.compute.types import Provider, DEPRECATED_RACKSPACE_PROVIDERS
|
||||
from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING
|
||||
|
||||
__all__ = [
|
||||
"Provider",
|
||||
"DRIVERS",
|
||||
"get_driver"]
|
||||
|
||||
DRIVERS = {
|
||||
Provider.DUMMY:
|
||||
('libcloud.compute.drivers.dummy', 'DummyNodeDriver'),
|
||||
Provider.EC2_US_EAST:
|
||||
('libcloud.compute.drivers.ec2', 'EC2NodeDriver'),
|
||||
Provider.EC2_EU_WEST:
|
||||
('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'),
|
||||
Provider.EC2_US_WEST:
|
||||
('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'),
|
||||
Provider.EC2_US_WEST_OREGON:
|
||||
('libcloud.compute.drivers.ec2', 'EC2USWestOregonNodeDriver'),
|
||||
Provider.EC2_AP_SOUTHEAST:
|
||||
('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'),
|
||||
Provider.EC2_AP_NORTHEAST:
|
||||
('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'),
|
||||
Provider.EC2_SA_EAST:
|
||||
('libcloud.compute.drivers.ec2', 'EC2SAEastNodeDriver'),
|
||||
Provider.EC2_AP_SOUTHEAST2:
|
||||
('libcloud.compute.drivers.ec2', 'EC2APSESydneyNodeDriver'),
|
||||
Provider.ECP:
|
||||
('libcloud.compute.drivers.ecp', 'ECPNodeDriver'),
|
||||
Provider.ELASTICHOSTS:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'),
|
||||
Provider.ELASTICHOSTS_UK1:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'),
|
||||
Provider.ELASTICHOSTS_UK2:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'),
|
||||
Provider.ELASTICHOSTS_US1:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'),
|
||||
Provider.ELASTICHOSTS_US2:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS2NodeDriver'),
|
||||
Provider.ELASTICHOSTS_US3:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS3NodeDriver'),
|
||||
Provider.ELASTICHOSTS_CA1:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCA1NodeDriver'),
|
||||
Provider.ELASTICHOSTS_AU1:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsAU1NodeDriver'),
|
||||
Provider.ELASTICHOSTS_CN1:
|
||||
('libcloud.compute.drivers.elastichosts', 'ElasticHostsCN1NodeDriver'),
|
||||
Provider.SKALICLOUD:
|
||||
('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'),
|
||||
Provider.SERVERLOVE:
|
||||
('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'),
|
||||
Provider.CLOUDSIGMA:
|
||||
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'),
|
||||
Provider.GCE:
|
||||
('libcloud.compute.drivers.gce', 'GCENodeDriver'),
|
||||
Provider.GOGRID:
|
||||
('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'),
|
||||
Provider.RACKSPACE:
|
||||
('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'),
|
||||
Provider.RACKSPACE_FIRST_GEN:
|
||||
('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'),
|
||||
Provider.HPCLOUD:
|
||||
('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'),
|
||||
Provider.KILI:
|
||||
('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'),
|
||||
Provider.VPSNET:
|
||||
('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'),
|
||||
Provider.LINODE:
|
||||
('libcloud.compute.drivers.linode', 'LinodeNodeDriver'),
|
||||
Provider.RIMUHOSTING:
|
||||
('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'),
|
||||
Provider.VOXEL:
|
||||
('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'),
|
||||
Provider.SOFTLAYER:
|
||||
('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'),
|
||||
Provider.EUCALYPTUS:
|
||||
('libcloud.compute.drivers.ec2', 'EucNodeDriver'),
|
||||
Provider.IBM:
|
||||
('libcloud.compute.drivers.ibm_sce', 'IBMNodeDriver'),
|
||||
Provider.OPENNEBULA:
|
||||
('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'),
|
||||
Provider.DREAMHOST:
|
||||
('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'),
|
||||
Provider.BRIGHTBOX:
|
||||
('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'),
|
||||
Provider.NIMBUS:
|
||||
('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'),
|
||||
Provider.BLUEBOX:
|
||||
('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'),
|
||||
Provider.GANDI:
|
||||
('libcloud.compute.drivers.gandi', 'GandiNodeDriver'),
|
||||
Provider.OPSOURCE:
|
||||
('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'),
|
||||
Provider.OPENSTACK:
|
||||
('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'),
|
||||
Provider.NINEFOLD:
|
||||
('libcloud.compute.drivers.ninefold', 'NinefoldNodeDriver'),
|
||||
Provider.VCLOUD:
|
||||
('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'),
|
||||
Provider.TERREMARK:
|
||||
('libcloud.compute.drivers.vcloud', 'TerremarkDriver'),
|
||||
Provider.CLOUDSTACK:
|
||||
('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'),
|
||||
Provider.LIBVIRT:
|
||||
('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'),
|
||||
Provider.JOYENT:
|
||||
('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'),
|
||||
Provider.VCL:
|
||||
('libcloud.compute.drivers.vcl', 'VCLNodeDriver'),
|
||||
Provider.KTUCLOUD:
|
||||
('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'),
|
||||
Provider.HOSTVIRTUAL:
|
||||
('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'),
|
||||
Provider.ABIQUO:
|
||||
('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'),
|
||||
Provider.DIGITAL_OCEAN:
|
||||
('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'),
|
||||
Provider.NEPHOSCALE:
|
||||
('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'),
|
||||
Provider.CLOUDFRAMES:
|
||||
('libcloud.compute.drivers.cloudframes', 'CloudFramesNodeDriver'),
|
||||
Provider.EXOSCALE:
|
||||
('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'),
|
||||
Provider.IKOULA:
|
||||
('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'),
|
||||
Provider.OUTSCALE_SAS:
|
||||
('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'),
|
||||
Provider.OUTSCALE_INC:
|
||||
('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'),
|
||||
|
||||
# Deprecated
|
||||
Provider.CLOUDSIGMA_US:
|
||||
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaLvsNodeDriver'),
|
||||
}
|
||||
|
||||
|
||||
def get_driver(provider):
|
||||
if provider in DEPRECATED_RACKSPACE_PROVIDERS:
|
||||
id_to_name_map = dict([(v, k) for k, v in Provider.__dict__.items()])
|
||||
old_name = id_to_name_map[provider]
|
||||
new_name = id_to_name_map[OLD_CONSTANT_TO_NEW_MAPPING[provider]]
|
||||
|
||||
url = 'http://s.apache.org/lc0140un'
|
||||
msg = ('Provider constant %s has been removed. New constant '
|
||||
'is now called %s.\n'
|
||||
'For more information on this change and how to modify your '
|
||||
'code to work with it, please visit: %s' %
|
||||
(old_name, new_name, url))
|
||||
raise Exception(msg)
|
||||
|
||||
return _get_provider_driver(DRIVERS, provider)
|
||||
|
||||
|
||||
def set_driver(provider, module, klass):
|
||||
return _set_provider_driver(DRIVERS, provider, module, klass)
|
||||
530
awx/lib/site-packages/libcloud/compute/ssh.py
Normal file
530
awx/lib/site-packages/libcloud/compute/ssh.py
Normal file
@@ -0,0 +1,530 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Wraps multiple ways to communicate over SSH.
|
||||
"""
|
||||
|
||||
have_paramiko = False
|
||||
|
||||
try:
|
||||
import paramiko
|
||||
have_paramiko = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Depending on your version of Paramiko, it may cause a deprecation
|
||||
# warning on Python 2.6.
|
||||
# Ref: https://bugs.launchpad.net/paramiko/+bug/392973
|
||||
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
from os.path import split as psplit
|
||||
from os.path import join as pjoin
|
||||
|
||||
from libcloud.utils.logging import ExtraLogFormatter
|
||||
from libcloud.utils.py3 import StringIO
|
||||
|
||||
__all__ = [
|
||||
'BaseSSHClient',
|
||||
'ParamikoSSHClient',
|
||||
'ShellOutSSHClient',
|
||||
|
||||
'SSHCommandTimeoutError'
|
||||
]
|
||||
|
||||
|
||||
# Maximum number of bytes to read at once from a socket
|
||||
CHUNK_SIZE = 1024
|
||||
|
||||
|
||||
class SSHCommandTimeoutError(Exception):
|
||||
"""
|
||||
Exception which is raised when an SSH command times out.
|
||||
"""
|
||||
def __init__(self, cmd, timeout):
|
||||
self.cmd = cmd
|
||||
self.timeout = timeout
|
||||
message = 'Command didn\'t finish in %s seconds' % (timeout)
|
||||
super(SSHCommandTimeoutError, self).__init__(message)
|
||||
|
||||
def __repr__(self):
|
||||
return ('<SSHCommandTimeoutError: cmd="%s",timeout=%s)>' %
|
||||
(self.cmd, self.timeout))
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
|
||||
|
||||
class BaseSSHClient(object):
|
||||
"""
|
||||
Base class representing a connection over SSH/SCP to a remote node.
|
||||
"""
|
||||
|
||||
def __init__(self, hostname, port=22, username='root', password=None,
|
||||
key=None, key_files=None, timeout=None):
|
||||
"""
|
||||
:type hostname: ``str``
|
||||
:keyword hostname: Hostname or IP address to connect to.
|
||||
|
||||
:type port: ``int``
|
||||
:keyword port: TCP port to communicate on, defaults to 22.
|
||||
|
||||
:type username: ``str``
|
||||
:keyword username: Username to use, defaults to root.
|
||||
|
||||
:type password: ``str``
|
||||
:keyword password: Password to authenticate with or a password used
|
||||
to unlock a private key if a password protected key
|
||||
is used.
|
||||
|
||||
:param key: Deprecated in favor of ``key_files`` argument.
|
||||
|
||||
:type key_files: ``str`` or ``list``
|
||||
:keyword key_files: A list of paths to the private key files to use.
|
||||
"""
|
||||
if key is not None:
|
||||
message = ('You are using deprecated "key" argument which has '
|
||||
'been replaced with "key_files" argument')
|
||||
warnings.warn(message, DeprecationWarning)
|
||||
|
||||
# key_files has precedent
|
||||
key_files = key if not key_files else key_files
|
||||
|
||||
self.hostname = hostname
|
||||
self.port = port
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.key_files = key_files
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connect to the remote node over SSH.
|
||||
|
||||
:return: True if the connection has been successfuly established, False
|
||||
otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'connect not implemented for this ssh client')
|
||||
|
||||
def put(self, path, contents=None, chmod=None, mode='w'):
|
||||
"""
|
||||
Upload a file to the remote node.
|
||||
|
||||
:type path: ``str``
|
||||
:keyword path: File path on the remote node.
|
||||
|
||||
:type contents: ``str``
|
||||
:keyword contents: File Contents.
|
||||
|
||||
:type chmod: ``int``
|
||||
:keyword chmod: chmod file to this after creation.
|
||||
|
||||
:type mode: ``str``
|
||||
:keyword mode: Mode in which the file is opened.
|
||||
|
||||
:return: Full path to the location where a file has been saved.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'put not implemented for this ssh client')
|
||||
|
||||
def delete(self, path):
|
||||
"""
|
||||
Delete/Unlink a file on the remote node.
|
||||
|
||||
:type path: ``str``
|
||||
:keyword path: File path on the remote node.
|
||||
|
||||
:return: True if the file has been successfuly deleted, False
|
||||
otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'delete not implemented for this ssh client')
|
||||
|
||||
def run(self, cmd):
|
||||
"""
|
||||
Run a command on a remote node.
|
||||
|
||||
:type cmd: ``str``
|
||||
:keyword cmd: Command to run.
|
||||
|
||||
:return ``list`` of [stdout, stderr, exit_status]
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'run not implemented for this ssh client')
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Shutdown connection to the remote node.
|
||||
|
||||
:return: True if the connection has been successfuly closed, False
|
||||
otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'close not implemented for this ssh client')
|
||||
|
||||
def _get_and_setup_logger(self):
|
||||
logger = logging.getLogger('libcloud.compute.ssh')
|
||||
path = os.getenv('LIBCLOUD_DEBUG')
|
||||
|
||||
if path:
|
||||
handler = logging.FileHandler(path)
|
||||
handler.setFormatter(ExtraLogFormatter())
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
class ParamikoSSHClient(BaseSSHClient):
|
||||
|
||||
"""
|
||||
A SSH Client powered by Paramiko.
|
||||
"""
|
||||
def __init__(self, hostname, port=22, username='root', password=None,
|
||||
key=None, key_files=None, key_material=None, timeout=None):
|
||||
"""
|
||||
Authentication is always attempted in the following order:
|
||||
|
||||
- The key passed in (if key is provided)
|
||||
- Any key we can find through an SSH agent (only if no password and
|
||||
key is provided)
|
||||
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (only if no
|
||||
password and key is provided)
|
||||
- Plain username/password auth, if a password was given (if password is
|
||||
provided)
|
||||
"""
|
||||
if key_files and key_material:
|
||||
raise ValueError(('key_files and key_material arguments are '
|
||||
'mutually exclusive'))
|
||||
|
||||
super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port,
|
||||
username=username,
|
||||
password=password,
|
||||
key=key,
|
||||
key_files=key_files,
|
||||
timeout=timeout)
|
||||
|
||||
self.key_material = key_material
|
||||
|
||||
self.client = paramiko.SSHClient()
|
||||
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
self.logger = self._get_and_setup_logger()
|
||||
|
||||
def connect(self):
|
||||
conninfo = {'hostname': self.hostname,
|
||||
'port': self.port,
|
||||
'username': self.username,
|
||||
'allow_agent': False,
|
||||
'look_for_keys': False}
|
||||
|
||||
if self.password:
|
||||
conninfo['password'] = self.password
|
||||
|
||||
if self.key_files:
|
||||
conninfo['key_filename'] = self.key_files
|
||||
|
||||
if self.key_material:
|
||||
conninfo['pkey'] = self._get_pkey_object(key=self.key_material)
|
||||
|
||||
if not self.password and not (self.key_files or self.key_material):
|
||||
conninfo['allow_agent'] = True
|
||||
conninfo['look_for_keys'] = True
|
||||
|
||||
if self.timeout:
|
||||
conninfo['timeout'] = self.timeout
|
||||
|
||||
extra = {'_hostname': self.hostname, '_port': self.port,
|
||||
'_username': self.username, '_timeout': self.timeout}
|
||||
self.logger.debug('Connecting to server', extra=extra)
|
||||
|
||||
self.client.connect(**conninfo)
|
||||
return True
|
||||
|
||||
def put(self, path, contents=None, chmod=None, mode='w'):
|
||||
extra = {'_path': path, '_mode': mode, '_chmod': chmod}
|
||||
self.logger.debug('Uploading file', extra=extra)
|
||||
|
||||
sftp = self.client.open_sftp()
|
||||
# less than ideal, but we need to mkdir stuff otherwise file() fails
|
||||
head, tail = psplit(path)
|
||||
|
||||
if path[0] == "/":
|
||||
sftp.chdir("/")
|
||||
else:
|
||||
# Relative path - start from a home directory (~)
|
||||
sftp.chdir('.')
|
||||
|
||||
for part in head.split("/"):
|
||||
if part != "":
|
||||
try:
|
||||
sftp.mkdir(part)
|
||||
except IOError:
|
||||
# so, there doesn't seem to be a way to
|
||||
# catch EEXIST consistently *sigh*
|
||||
pass
|
||||
sftp.chdir(part)
|
||||
|
||||
cwd = sftp.getcwd()
|
||||
|
||||
ak = sftp.file(tail, mode=mode)
|
||||
ak.write(contents)
|
||||
if chmod is not None:
|
||||
ak.chmod(chmod)
|
||||
ak.close()
|
||||
sftp.close()
|
||||
|
||||
if path[0] == '/':
|
||||
file_path = path
|
||||
else:
|
||||
file_path = pjoin(cwd, path)
|
||||
|
||||
return file_path
|
||||
|
||||
def delete(self, path):
|
||||
extra = {'_path': path}
|
||||
self.logger.debug('Deleting file', extra=extra)
|
||||
|
||||
sftp = self.client.open_sftp()
|
||||
sftp.unlink(path)
|
||||
sftp.close()
|
||||
return True
|
||||
|
||||
def run(self, cmd, timeout=None):
|
||||
"""
|
||||
Note: This function is based on paramiko's exec_command()
|
||||
method.
|
||||
|
||||
:param timeout: How long to wait (in seconds) for the command to
|
||||
finish (optional).
|
||||
:type timeout: ``float``
|
||||
"""
|
||||
extra = {'_cmd': cmd}
|
||||
self.logger.debug('Executing command', extra=extra)
|
||||
|
||||
# Use the system default buffer size
|
||||
bufsize = -1
|
||||
|
||||
transport = self.client.get_transport()
|
||||
chan = transport.open_session()
|
||||
|
||||
start_time = time.time()
|
||||
chan.exec_command(cmd)
|
||||
|
||||
stdout = StringIO()
|
||||
stderr = StringIO()
|
||||
|
||||
# Create a stdin file and immediately close it to prevent any
|
||||
# interactive script from hanging the process.
|
||||
stdin = chan.makefile('wb', bufsize)
|
||||
stdin.close()
|
||||
|
||||
# Receive all the output
|
||||
# Note #1: This is used instead of chan.makefile approach to prevent
|
||||
# buffering issues and hanging if the executed command produces a lot
|
||||
# of output.
|
||||
#
|
||||
# Note #2: If you are going to remove "ready" checks inside the loop
|
||||
# you are going to have a bad time. Trying to consume from a channel
|
||||
# which is not ready will block for indefinitely.
|
||||
exit_status_ready = chan.exit_status_ready()
|
||||
|
||||
while not exit_status_ready:
|
||||
current_time = time.time()
|
||||
elapsed_time = (current_time - start_time)
|
||||
|
||||
if timeout and (elapsed_time > timeout):
|
||||
# TODO: Is this the right way to clean up?
|
||||
chan.close()
|
||||
|
||||
raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout)
|
||||
|
||||
if chan.recv_ready():
|
||||
data = chan.recv(CHUNK_SIZE)
|
||||
|
||||
while data:
|
||||
stdout.write(data)
|
||||
ready = chan.recv_ready()
|
||||
|
||||
if not ready:
|
||||
break
|
||||
|
||||
data = chan.recv(CHUNK_SIZE)
|
||||
|
||||
if chan.recv_stderr_ready():
|
||||
data = chan.recv_stderr(CHUNK_SIZE)
|
||||
|
||||
while data:
|
||||
stderr.write(data)
|
||||
ready = chan.recv_stderr_ready()
|
||||
|
||||
if not ready:
|
||||
break
|
||||
|
||||
data = chan.recv_stderr(CHUNK_SIZE)
|
||||
|
||||
# We need to check the exist status here, because the command could
|
||||
# print some output and exit during this sleep bellow.
|
||||
exit_status_ready = chan.exit_status_ready()
|
||||
|
||||
if exit_status_ready:
|
||||
break
|
||||
|
||||
# Short sleep to prevent busy waiting
|
||||
time.sleep(1.5)
|
||||
|
||||
# Receive the exit status code of the command we ran.
|
||||
status = chan.recv_exit_status()
|
||||
|
||||
stdout = stdout.getvalue()
|
||||
stderr = stderr.getvalue()
|
||||
|
||||
extra = {'_status': status, '_stdout': stdout, '_stderr': stderr}
|
||||
self.logger.debug('Command finished', extra=extra)
|
||||
|
||||
return [stdout, stderr, status]
|
||||
|
||||
def close(self):
|
||||
self.logger.debug('Closing server connection')
|
||||
|
||||
self.client.close()
|
||||
return True
|
||||
|
||||
def _get_pkey_object(self, key):
|
||||
"""
|
||||
Try to detect private key type and return paramiko.PKey object.
|
||||
"""
|
||||
|
||||
for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]:
|
||||
try:
|
||||
key = cls.from_private_key(StringIO(key))
|
||||
except paramiko.ssh_exception.SSHException:
|
||||
# Invalid key, try other key type
|
||||
pass
|
||||
else:
|
||||
return key
|
||||
|
||||
msg = 'Invalid or unsupported key type'
|
||||
raise paramiko.ssh_exception.SSHException(msg)
|
||||
|
||||
|
||||
class ShellOutSSHClient(BaseSSHClient):
|
||||
"""
|
||||
This client shells out to "ssh" binary to run commands on the remote
|
||||
server.
|
||||
|
||||
Note: This client should not be used in production.
|
||||
"""
|
||||
|
||||
def __init__(self, hostname, port=22, username='root', password=None,
|
||||
key=None, key_files=None, timeout=None):
|
||||
super(ShellOutSSHClient, self).__init__(hostname=hostname,
|
||||
port=port, username=username,
|
||||
password=password,
|
||||
key=key,
|
||||
key_files=key_files,
|
||||
timeout=timeout)
|
||||
if self.password:
|
||||
raise ValueError('ShellOutSSHClient only supports key auth')
|
||||
|
||||
child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
child.communicate()
|
||||
|
||||
if child.returncode == 127:
|
||||
raise ValueError('ssh client is not available')
|
||||
|
||||
self.logger = self._get_and_setup_logger()
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
This client doesn't support persistent connections establish a new
|
||||
connection every time "run" method is called.
|
||||
"""
|
||||
return True
|
||||
|
||||
def run(self, cmd):
|
||||
return self._run_remote_shell_command([cmd])
|
||||
|
||||
def put(self, path, contents=None, chmod=None, mode='w'):
|
||||
if mode == 'w':
|
||||
redirect = '>'
|
||||
elif mode == 'a':
|
||||
redirect = '>>'
|
||||
else:
|
||||
raise ValueError('Invalid mode: ' + mode)
|
||||
|
||||
cmd = ['echo "%s" %s %s' % (contents, redirect, path)]
|
||||
self._run_remote_shell_command(cmd)
|
||||
return path
|
||||
|
||||
def delete(self, path):
|
||||
cmd = ['rm', '-rf', path]
|
||||
self._run_remote_shell_command(cmd)
|
||||
return True
|
||||
|
||||
def close(self):
|
||||
return True
|
||||
|
||||
def _get_base_ssh_command(self):
|
||||
cmd = ['ssh']
|
||||
|
||||
if self.key_files:
|
||||
cmd += ['-i', self.key_files]
|
||||
|
||||
if self.timeout:
|
||||
cmd += ['-oConnectTimeout=%s' % (self.timeout)]
|
||||
|
||||
cmd += ['%s@%s' % (self.username, self.hostname)]
|
||||
|
||||
return cmd
|
||||
|
||||
def _run_remote_shell_command(self, cmd):
|
||||
"""
|
||||
Run a command on a remote server.
|
||||
|
||||
:param cmd: Command to run.
|
||||
:type cmd: ``list`` of ``str``
|
||||
|
||||
:return: Command stdout, stderr and status code.
|
||||
:rtype: ``tuple``
|
||||
"""
|
||||
base_cmd = self._get_base_ssh_command()
|
||||
full_cmd = base_cmd + [' '.join(cmd)]
|
||||
|
||||
self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd)))
|
||||
|
||||
child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
stdout, stderr = child.communicate()
|
||||
return (stdout, stderr, child.returncode)
|
||||
|
||||
|
||||
class MockSSHClient(BaseSSHClient):
|
||||
pass
|
||||
|
||||
|
||||
SSHClient = ParamikoSSHClient
|
||||
if not have_paramiko:
|
||||
SSHClient = MockSSHClient
|
||||
249
awx/lib/site-packages/libcloud/compute/types.py
Normal file
249
awx/lib/site-packages/libcloud/compute/types.py
Normal file
@@ -0,0 +1,249 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Base types used by other parts of libcloud
|
||||
"""
|
||||
|
||||
from libcloud.common.types import LibcloudError, MalformedResponseError
|
||||
from libcloud.common.types import InvalidCredsError, InvalidCredsException
|
||||
|
||||
__all__ = [
|
||||
"Provider",
|
||||
"NodeState",
|
||||
"DeploymentError",
|
||||
"DeploymentException",
|
||||
|
||||
# @@TR: should the unused imports below be exported?
|
||||
"LibcloudError",
|
||||
"MalformedResponseError",
|
||||
"InvalidCredsError",
|
||||
"InvalidCredsException",
|
||||
"DEPRECATED_RACKSPACE_PROVIDERS",
|
||||
"OLD_CONSTANT_TO_NEW_MAPPING"
|
||||
]
|
||||
|
||||
|
||||
class Provider(object):
|
||||
"""
|
||||
Defines for each of the supported providers
|
||||
|
||||
:cvar DUMMY: Example provider
|
||||
:cvar EC2_US_EAST: Amazon AWS US N. Virgina
|
||||
:cvar EC2_US_WEST: Amazon AWS US N. California
|
||||
:cvar EC2_EU_WEST: Amazon AWS EU Ireland
|
||||
:cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers
|
||||
:cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers
|
||||
:cvar GCE: Google Compute Engine
|
||||
:cvar GOGRID: GoGrid
|
||||
:cvar VPSNET: VPS.net
|
||||
:cvar LINODE: Linode.com
|
||||
:cvar VCLOUD: vmware vCloud
|
||||
:cvar RIMUHOSTING: RimuHosting.com
|
||||
:cvar ECP: Enomaly
|
||||
:cvar IBM: IBM Developer Cloud
|
||||
:cvar OPENNEBULA: OpenNebula.org
|
||||
:cvar DREAMHOST: DreamHost Private Server
|
||||
:cvar ELASTICHOSTS: ElasticHosts.com
|
||||
:cvar CLOUDSIGMA: CloudSigma
|
||||
:cvar NIMBUS: Nimbus
|
||||
:cvar BLUEBOX: Bluebox
|
||||
:cvar OPSOURCE: Opsource Cloud
|
||||
:cvar NINEFOLD: Ninefold
|
||||
:cvar TERREMARK: Terremark
|
||||
:cvar EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon)
|
||||
:cvar CLOUDSTACK: CloudStack
|
||||
:cvar CLOUDSIGMA_US: CloudSigma US Las Vegas
|
||||
:cvar LIBVIRT: Libvirt driver
|
||||
:cvar JOYENT: Joyent driver
|
||||
:cvar VCL: VCL driver
|
||||
:cvar KTUCLOUD: kt ucloud driver
|
||||
:cvar GRIDSPOT: Gridspot driver
|
||||
:cvar ABIQUO: Abiquo driver
|
||||
:cvar NEPHOSCALE: NephoScale driver
|
||||
:cvar EXOSCALE: Exoscale driver.
|
||||
:cvar IKOULA: Ikoula driver.
|
||||
:cvar OUTSCALE_SAS: Outscale SAS driver.
|
||||
:cvar OUTSCALE_INC: Outscale INC driver.
|
||||
"""
|
||||
DUMMY = 'dummy'
|
||||
EC2 = 'ec2_us_east'
|
||||
RACKSPACE = 'rackspace'
|
||||
GCE = 'gce'
|
||||
GOGRID = 'gogrid'
|
||||
VPSNET = 'vpsnet'
|
||||
LINODE = 'linode'
|
||||
VCLOUD = 'vcloud'
|
||||
RIMUHOSTING = 'rimuhosting'
|
||||
VOXEL = 'voxel'
|
||||
SOFTLAYER = 'softlayer'
|
||||
EUCALYPTUS = 'eucalyptus'
|
||||
ECP = 'ecp'
|
||||
IBM = 'ibm'
|
||||
OPENNEBULA = 'opennebula'
|
||||
DREAMHOST = 'dreamhost'
|
||||
ELASTICHOSTS = 'elastichosts'
|
||||
BRIGHTBOX = 'brightbox'
|
||||
CLOUDSIGMA = 'cloudsigma'
|
||||
NIMBUS = 'nimbus'
|
||||
BLUEBOX = 'bluebox'
|
||||
GANDI = 'gandi'
|
||||
OPSOURCE = 'opsource'
|
||||
OPENSTACK = 'openstack'
|
||||
SKALICLOUD = 'skalicloud'
|
||||
SERVERLOVE = 'serverlove'
|
||||
NINEFOLD = 'ninefold'
|
||||
TERREMARK = 'terremark'
|
||||
CLOUDSTACK = 'cloudstack'
|
||||
LIBVIRT = 'libvirt'
|
||||
JOYENT = 'joyent'
|
||||
VCL = 'vcl'
|
||||
KTUCLOUD = 'ktucloud'
|
||||
GRIDSPOT = 'gridspot'
|
||||
RACKSPACE_FIRST_GEN = 'rackspace_first_gen'
|
||||
HOSTVIRTUAL = 'hostvirtual'
|
||||
ABIQUO = 'abiquo'
|
||||
DIGITAL_OCEAN = 'digitalocean'
|
||||
NEPHOSCALE = 'nephoscale'
|
||||
CLOUDFRAMES = 'cloudframes'
|
||||
EXOSCALE = 'exoscale'
|
||||
IKOULA = 'ikoula'
|
||||
OUTSCALE_SAS = 'outscale_sas'
|
||||
OUTSCALE_INC = 'outscale_inc'
|
||||
|
||||
# OpenStack based providers
|
||||
HPCLOUD = 'hpcloud'
|
||||
KILI = 'kili'
|
||||
|
||||
# Deprecated constants which are still supported
|
||||
EC2_US_EAST = 'ec2_us_east'
|
||||
EC2_EU = 'ec2_eu_west' # deprecated name
|
||||
EC2_EU_WEST = 'ec2_eu_west'
|
||||
EC2_US_WEST = 'ec2_us_west'
|
||||
EC2_AP_SOUTHEAST = 'ec2_ap_southeast'
|
||||
EC2_AP_NORTHEAST = 'ec2_ap_northeast'
|
||||
EC2_US_WEST_OREGON = 'ec2_us_west_oregon'
|
||||
EC2_SA_EAST = 'ec2_sa_east'
|
||||
EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2'
|
||||
|
||||
ELASTICHOSTS_UK1 = 'elastichosts_uk1'
|
||||
ELASTICHOSTS_UK2 = 'elastichosts_uk2'
|
||||
ELASTICHOSTS_US1 = 'elastichosts_us1'
|
||||
ELASTICHOSTS_US2 = 'elastichosts_us2'
|
||||
ELASTICHOSTS_US3 = 'elastichosts_us3'
|
||||
ELASTICHOSTS_CA1 = 'elastichosts_ca1'
|
||||
ELASTICHOSTS_AU1 = 'elastichosts_au1'
|
||||
ELASTICHOSTS_CN1 = 'elastichosts_cn1'
|
||||
|
||||
CLOUDSIGMA_US = 'cloudsigma_us'
|
||||
|
||||
# Deprecated constants which aren't supported anymore
|
||||
RACKSPACE_UK = 'rackspace_uk'
|
||||
RACKSPACE_NOVA_BETA = 'rackspace_nova_beta'
|
||||
RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw'
|
||||
RACKSPACE_NOVA_LON = 'rackspace_nova_lon'
|
||||
RACKSPACE_NOVA_ORD = 'rackspace_nova_ord'
|
||||
|
||||
# Removed
|
||||
# SLICEHOST = 'slicehost'
|
||||
|
||||
|
||||
DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK,
|
||||
Provider.RACKSPACE_NOVA_BETA,
|
||||
Provider.RACKSPACE_NOVA_DFW,
|
||||
Provider.RACKSPACE_NOVA_LON,
|
||||
Provider.RACKSPACE_NOVA_ORD]
|
||||
OLD_CONSTANT_TO_NEW_MAPPING = {
|
||||
Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN,
|
||||
Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN,
|
||||
|
||||
Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE,
|
||||
Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE,
|
||||
Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE,
|
||||
Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE
|
||||
}
|
||||
|
||||
|
||||
class NodeState(object):
|
||||
"""
|
||||
Standard states for a node
|
||||
|
||||
:cvar RUNNING: Node is running.
|
||||
:cvar REBOOTING: Node is rebooting.
|
||||
:cvar TERMINATED: Node is terminated. This node can't be started later on.
|
||||
:cvar STOPPED: Node is stopped. This node can be started later on.
|
||||
:cvar PENDING: Node is pending.
|
||||
:cvar UNKNOWN: Node state is unknown.
|
||||
"""
|
||||
RUNNING = 0
|
||||
REBOOTING = 1
|
||||
TERMINATED = 2
|
||||
PENDING = 3
|
||||
UNKNOWN = 4
|
||||
STOPPED = 5
|
||||
|
||||
|
||||
class Architecture(object):
|
||||
"""
|
||||
Image and size architectures.
|
||||
|
||||
:cvar I386: i386 (32 bt)
|
||||
:cvar X86_64: x86_64 (64 bit)
|
||||
"""
|
||||
I386 = 0
|
||||
X86_X64 = 1
|
||||
|
||||
|
||||
class DeploymentError(LibcloudError):
|
||||
"""
|
||||
Exception used when a Deployment Task failed.
|
||||
|
||||
:ivar node: :class:`Node` on which this exception happened, you might want
|
||||
to call :func:`Node.destroy`
|
||||
"""
|
||||
def __init__(self, node, original_exception=None, driver=None):
|
||||
self.node = node
|
||||
self.value = original_exception
|
||||
self.driver = driver
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return (('<DeploymentError: node=%s, error=%s, driver=%s>'
|
||||
% (self.node.id, str(self.value), str(self.driver))))
|
||||
|
||||
|
||||
class KeyPairError(LibcloudError):
|
||||
error_type = 'KeyPairError'
|
||||
|
||||
def __init__(self, name, driver):
|
||||
self.name = name
|
||||
self.value = 'Key pair with name %s does not exist' % (name)
|
||||
super(KeyPairError, self).__init__(value=self.value, driver=driver)
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s name=%s, value=%s, driver=%s>' %
|
||||
(self.error_type, self.name, self.value, self.driver.name))
|
||||
|
||||
|
||||
class KeyPairDoesNotExistError(KeyPairError):
|
||||
error_type = 'KeyPairDoesNotExistError'
|
||||
|
||||
|
||||
"""Deprecated alias of :class:`DeploymentException`"""
|
||||
DeploymentException = DeploymentError
|
||||
566
awx/lib/site-packages/libcloud/data/pricing.json
Normal file
566
awx/lib/site-packages/libcloud/data/pricing.json
Normal file
@@ -0,0 +1,566 @@
|
||||
{
|
||||
"compute": {
|
||||
"ec2_us_west_oregon": {
|
||||
"m3.medium": "0.070",
|
||||
"m3.large": "0.140",
|
||||
"m3.xlarge": "0.280",
|
||||
"m3.2xlarge": "0.560",
|
||||
"m1.small": "0.044",
|
||||
"m1.medium": "0.087",
|
||||
"m1.large": "0.175",
|
||||
"m1.xlarge": "0.350",
|
||||
"c3.large": "0.105",
|
||||
"c3.xlarge": "0.210",
|
||||
"c3.2xlarge": "0.420",
|
||||
"c3.4xlarge": "0.840",
|
||||
"c3.8xlarge": "1.680",
|
||||
"c1.medium": "0.130",
|
||||
"c1.xlarge": "0.520",
|
||||
"cc2.8xlarge": "2.000",
|
||||
"g2.2xlarge": "0.650",
|
||||
"r3.large": "0.175",
|
||||
"r3.xlarge": "0.350",
|
||||
"r3.2xlarge": "0.700",
|
||||
"r3.4xlarge": "1.400",
|
||||
"r3.8xlarge": "2.800",
|
||||
"m2.xlarge": "0.245",
|
||||
"m2.2xlarge": "0.490",
|
||||
"m2.4xlarge": "0.980",
|
||||
"cr1.8xlarge": "3.500",
|
||||
"i2.xlarge": "0.853",
|
||||
"i2.2xlarge": "1.705",
|
||||
"i2.4xlarge": "3.410",
|
||||
"i2.8xlarge": "6.820",
|
||||
"hs1.8xlarge": "4.600",
|
||||
"hi1.4xlarge": "3.100",
|
||||
"t1.micro": "0.020"
|
||||
},
|
||||
"ec2_us_west": {
|
||||
"m3.medium": "0.077",
|
||||
"m3.large": "0.154",
|
||||
"m3.xlarge": "0.308",
|
||||
"m3.2xlarge": "0.616",
|
||||
"m1.small": "0.047",
|
||||
"m1.medium": "0.095",
|
||||
"m1.large": "0.190",
|
||||
"m1.xlarge": "0.379",
|
||||
"c3.large": "0.120",
|
||||
"c3.xlarge": "0.239",
|
||||
"c3.2xlarge": "0.478",
|
||||
"c3.4xlarge": "0.956",
|
||||
"c3.8xlarge": "1.912",
|
||||
"c1.medium": "0.148",
|
||||
"c1.xlarge": "0.592",
|
||||
"g2.2xlarge": "0.702",
|
||||
"r3.large": "0.195",
|
||||
"r3.xlarge": "0.390",
|
||||
"r3.2xlarge": "0.780",
|
||||
"r3.4xlarge": "1.560",
|
||||
"r3.8xlarge": "3.120",
|
||||
"m2.xlarge": "0.275",
|
||||
"m2.2xlarge": "0.550",
|
||||
"m2.4xlarge": "1.100",
|
||||
"i2.xlarge": "0.938",
|
||||
"i2.2xlarge": "1.876",
|
||||
"i2.4xlarge": "3.751",
|
||||
"i2.8xlarge": "7.502",
|
||||
"t1.micro": "0.025"
|
||||
},
|
||||
"ec2_eu_west": {
|
||||
"m3.medium": "0.077",
|
||||
"m3.large": "0.154",
|
||||
"m3.xlarge": "0.308",
|
||||
"m3.2xlarge": "0.616",
|
||||
"m1.small": "0.047",
|
||||
"m1.medium": "0.095",
|
||||
"m1.large": "0.190",
|
||||
"m1.xlarge": "0.379",
|
||||
"c3.large": "0.120",
|
||||
"c3.xlarge": "0.239",
|
||||
"c3.2xlarge": "0.478",
|
||||
"c3.4xlarge": "0.956",
|
||||
"c3.8xlarge": "1.912",
|
||||
"c1.medium": "0.148",
|
||||
"c1.xlarge": "0.592",
|
||||
"cc2.8xlarge": "2.250",
|
||||
"g2.2xlarge": "0.702",
|
||||
"cg1.4xlarge": "2.360",
|
||||
"r3.large": "0.195",
|
||||
"r3.xlarge": "0.390",
|
||||
"r3.2xlarge": "0.780",
|
||||
"r3.4xlarge": "1.560",
|
||||
"r3.8xlarge": "3.120",
|
||||
"m2.xlarge": "0.275",
|
||||
"m2.2xlarge": "0.550",
|
||||
"m2.4xlarge": "1.100",
|
||||
"cr1.8xlarge": "3.750",
|
||||
"i2.xlarge": "0.938",
|
||||
"i2.2xlarge": "1.876",
|
||||
"i2.4xlarge": "3.751",
|
||||
"i2.8xlarge": "7.502",
|
||||
"hs1.8xlarge": "4.900",
|
||||
"hi1.4xlarge": "3.100",
|
||||
"t1.micro": "0.020"
|
||||
},
|
||||
"rackspacenovalon": {
|
||||
"performance2-60": 2.72,
|
||||
"performance2-120": 5.44,
|
||||
"performance1-1": 0.04,
|
||||
"performance2-15": 0.68,
|
||||
"performance1-4": 0.16,
|
||||
"performance2-30": 1.36,
|
||||
"performance2-90": 4.08,
|
||||
"3": 0.064,
|
||||
"2": 0.032,
|
||||
"performance1-2": 0.08,
|
||||
"4": 0.129,
|
||||
"7": 0.967,
|
||||
"6": 0.516,
|
||||
"5": 0.258,
|
||||
"performance1-8": 0.32,
|
||||
"8": 1.612
|
||||
},
|
||||
"ec2_ap_southeast_2": {
|
||||
"m3.medium": "0.098",
|
||||
"m3.large": "0.196",
|
||||
"m3.xlarge": "0.392",
|
||||
"m3.2xlarge": "0.784",
|
||||
"m1.small": "0.058",
|
||||
"m1.medium": "0.117",
|
||||
"m1.large": "0.233",
|
||||
"m1.xlarge": "0.467",
|
||||
"c3.large": "0.132",
|
||||
"c3.xlarge": "0.265",
|
||||
"c3.2xlarge": "0.529",
|
||||
"c3.4xlarge": "1.058",
|
||||
"c3.8xlarge": "2.117",
|
||||
"c1.medium": "0.164",
|
||||
"c1.xlarge": "0.655",
|
||||
"r3.large": "0.210",
|
||||
"r3.xlarge": "0.420",
|
||||
"r3.2xlarge": "0.840",
|
||||
"r3.4xlarge": "1.680",
|
||||
"r3.8xlarge": "3.360",
|
||||
"m2.xlarge": "0.296",
|
||||
"m2.2xlarge": "0.592",
|
||||
"m2.4xlarge": "1.183",
|
||||
"i2.xlarge": "1.018",
|
||||
"i2.2xlarge": "2.035",
|
||||
"i2.4xlarge": "4.070",
|
||||
"i2.8xlarge": "8.140",
|
||||
"hs1.8xlarge": "5.570",
|
||||
"t1.micro": "0.020"
|
||||
},
|
||||
"vps_net": {
|
||||
"1": 0.416
|
||||
},
|
||||
"ec2_us_east": {
|
||||
"m3.medium": "0.070",
|
||||
"m3.large": "0.140",
|
||||
"m3.xlarge": "0.280",
|
||||
"m3.2xlarge": "0.560",
|
||||
"m1.small": "0.044",
|
||||
"m1.medium": "0.087",
|
||||
"m1.large": "0.175",
|
||||
"m1.xlarge": "0.350",
|
||||
"c3.large": "0.105",
|
||||
"c3.xlarge": "0.210",
|
||||
"c3.2xlarge": "0.420",
|
||||
"c3.4xlarge": "0.840",
|
||||
"c3.8xlarge": "1.680",
|
||||
"c1.medium": "0.130",
|
||||
"c1.xlarge": "0.520",
|
||||
"cc2.8xlarge": "2.000",
|
||||
"g2.2xlarge": "0.650",
|
||||
"cg1.4xlarge": "2.100",
|
||||
"r3.large": "0.175",
|
||||
"r3.xlarge": "0.350",
|
||||
"r3.2xlarge": "0.700",
|
||||
"r3.4xlarge": "1.400",
|
||||
"r3.8xlarge": "2.800",
|
||||
"m2.xlarge": "0.245",
|
||||
"m2.2xlarge": "0.490",
|
||||
"m2.4xlarge": "0.980",
|
||||
"cr1.8xlarge": "3.500",
|
||||
"i2.xlarge": "0.853",
|
||||
"i2.2xlarge": "1.705",
|
||||
"i2.4xlarge": "3.410",
|
||||
"i2.8xlarge": "6.820",
|
||||
"hs1.8xlarge": "4.600",
|
||||
"hi1.4xlarge": "3.100",
|
||||
"t1.micro": "0.020"
|
||||
},
|
||||
"rackspacenovaus": {
|
||||
"performance2-60": 2.72,
|
||||
"performance2-120": 5.44,
|
||||
"performance1-1": 0.04,
|
||||
"performance2-15": 0.68,
|
||||
"performance1-4": 0.16,
|
||||
"performance2-30": 1.36,
|
||||
"performance2-90": 4.08,
|
||||
"3": 0.06,
|
||||
"2": 0.022,
|
||||
"performance1-2": 0.08,
|
||||
"4": 0.12,
|
||||
"7": 0.96,
|
||||
"6": 0.48,
|
||||
"5": 0.24,
|
||||
"performance1-8": 0.32,
|
||||
"8": 1.2
|
||||
},
|
||||
"ec2_sa_east": {
|
||||
"m3.medium": "0.095",
|
||||
"m3.large": "0.190",
|
||||
"m3.xlarge": "0.381",
|
||||
"m3.2xlarge": "0.761",
|
||||
"m1.small": "0.058",
|
||||
"m1.medium": "0.117",
|
||||
"m1.large": "0.233",
|
||||
"m1.xlarge": "0.467",
|
||||
"c1.medium": "0.179",
|
||||
"c1.xlarge": "0.718",
|
||||
"m2.xlarge": "0.323",
|
||||
"m2.2xlarge": "0.645",
|
||||
"m2.4xlarge": "1.291",
|
||||
"t1.micro": "0.027"
|
||||
},
|
||||
"cloudsigma_zrh": {
|
||||
"high-cpu-medium": 0.211,
|
||||
"standard-large": 0.381,
|
||||
"micro-high-cpu": 0.381,
|
||||
"standard-extra-large": 0.762,
|
||||
"high-memory-double-extra-large": 1.383,
|
||||
"micro-regular": 0.0548,
|
||||
"standard-small": 0.0796,
|
||||
"high-memory-extra-large": 0.642,
|
||||
"high-cpu-extra-large": 0.78
|
||||
},
|
||||
"rackspacenovasyd": {
|
||||
"performance2-60": 2.72,
|
||||
"performance2-120": 5.44,
|
||||
"performance1-1": 0.04,
|
||||
"performance2-15": 0.68,
|
||||
"performance1-4": 0.16,
|
||||
"performance2-30": 1.36,
|
||||
"performance2-90": 4.08,
|
||||
"3": 0.072,
|
||||
"2": 0.026,
|
||||
"performance1-2": 0.08,
|
||||
"4": 0.144,
|
||||
"7": 1.08,
|
||||
"6": 0.576,
|
||||
"5": 0.288,
|
||||
"performance1-8": 0.32,
|
||||
"8": 1.44
|
||||
},
|
||||
"ec2_ap_northeast": {
|
||||
"m3.medium": "0.101",
|
||||
"m3.large": "0.203",
|
||||
"m3.xlarge": "0.405",
|
||||
"m3.2xlarge": "0.810",
|
||||
"m1.small": "0.061",
|
||||
"m1.medium": "0.122",
|
||||
"m1.large": "0.243",
|
||||
"m1.xlarge": "0.486",
|
||||
"c3.large": "0.128",
|
||||
"c3.xlarge": "0.255",
|
||||
"c3.2xlarge": "0.511",
|
||||
"c3.4xlarge": "1.021",
|
||||
"c3.8xlarge": "2.043",
|
||||
"c1.medium": "0.158",
|
||||
"c1.xlarge": "0.632",
|
||||
"cc2.8xlarge": "2.349",
|
||||
"g2.2xlarge": "0.898",
|
||||
"r3.large": "0.210",
|
||||
"r3.xlarge": "0.420",
|
||||
"r3.2xlarge": "0.840",
|
||||
"r3.4xlarge": "1.680",
|
||||
"r3.8xlarge": "3.360",
|
||||
"m2.xlarge": "0.287",
|
||||
"m2.2xlarge": "0.575",
|
||||
"m2.4xlarge": "1.150",
|
||||
"cr1.8xlarge": "4.105",
|
||||
"i2.xlarge": "1.001",
|
||||
"i2.2xlarge": "2.001",
|
||||
"i2.4xlarge": "4.002",
|
||||
"i2.8xlarge": "8.004",
|
||||
"hs1.8xlarge": "5.400",
|
||||
"hi1.4xlarge": "3.276",
|
||||
"t1.micro": "0.026"
|
||||
},
|
||||
"gogrid": {
|
||||
"24GB": 4.56,
|
||||
"512MB": 0.095,
|
||||
"8GB": 1.52,
|
||||
"4GB": 0.76,
|
||||
"2GB": 0.38,
|
||||
"1GB": 0.19,
|
||||
"16GB": 3.04
|
||||
},
|
||||
"serverlove": {
|
||||
"high-cpu-medium": 0.291,
|
||||
"medium": 0.404,
|
||||
"large": 0.534,
|
||||
"small": 0.161,
|
||||
"extra-large": 0.615,
|
||||
"high-cpu-extra-large": 0.776
|
||||
},
|
||||
"elastichosts": {
|
||||
"high-cpu-medium": 0.18,
|
||||
"medium": 0.223,
|
||||
"large": 0.378,
|
||||
"small": 0.1,
|
||||
"extra-large": 0.579,
|
||||
"high-cpu-extra-large": 0.77
|
||||
},
|
||||
"rackspace": {
|
||||
"performance2-60": 2.72,
|
||||
"performance2-120": 5.44,
|
||||
"performance1-1": 0.04,
|
||||
"performance2-15": 0.68,
|
||||
"performance1-4": 0.16,
|
||||
"performance2-30": 1.36,
|
||||
"1": 0.015,
|
||||
"performance2-90": 4.08,
|
||||
"3": 0.06,
|
||||
"2": 0.03,
|
||||
"performance1-2": 0.08,
|
||||
"4": 0.12,
|
||||
"7": 0.96,
|
||||
"6": 0.48,
|
||||
"5": 0.24,
|
||||
"performance1-8": 0.32,
|
||||
"8": 1.8
|
||||
},
|
||||
"nephoscale": {
|
||||
"11": 0.35,
|
||||
"27": 0.0,
|
||||
"48": 0.15,
|
||||
"46": 0.1,
|
||||
"54": 0.938,
|
||||
"56": 0.75,
|
||||
"50": 0.28,
|
||||
"52": 0.48,
|
||||
"1": 0.6,
|
||||
"3": 0.063,
|
||||
"5": 0.031,
|
||||
"7": 0.125,
|
||||
"9": 0.188
|
||||
},
|
||||
"nimbus": {
|
||||
"m1.xlarge": 0.0,
|
||||
"m1.small": 0.0,
|
||||
"m1.large": 0.0
|
||||
},
|
||||
"gandi": {
|
||||
"1": 0.02,
|
||||
"small": 0.02,
|
||||
"large": 0.06,
|
||||
"medium": 0.03,
|
||||
"x-large": 0.12
|
||||
},
|
||||
"skalicloud": {
|
||||
"high-cpu-medium": 0.249,
|
||||
"medium": 0.301,
|
||||
"large": 0.505,
|
||||
"small": 0.136,
|
||||
"extra-large": 0.654,
|
||||
"high-cpu-extra-large": 0.936
|
||||
},
|
||||
"bluebox": {
|
||||
"4gb": 0.35,
|
||||
"2gb": 0.25,
|
||||
"8gb": 0.45,
|
||||
"1gb": 0.15
|
||||
},
|
||||
"ec2_ap_southeast": {
|
||||
"m3.medium": "0.098",
|
||||
"m3.large": "0.196",
|
||||
"m3.xlarge": "0.392",
|
||||
"m3.2xlarge": "0.784",
|
||||
"m1.small": "0.058",
|
||||
"m1.medium": "0.117",
|
||||
"m1.large": "0.233",
|
||||
"m1.xlarge": "0.467",
|
||||
"c3.large": "0.132",
|
||||
"c3.xlarge": "0.265",
|
||||
"c3.2xlarge": "0.529",
|
||||
"c3.4xlarge": "1.058",
|
||||
"c3.8xlarge": "2.117",
|
||||
"c1.medium": "0.164",
|
||||
"c1.xlarge": "0.655",
|
||||
"r3.large": "0.210",
|
||||
"r3.xlarge": "0.420",
|
||||
"r3.2xlarge": "0.840",
|
||||
"r3.4xlarge": "1.680",
|
||||
"r3.8xlarge": "3.360",
|
||||
"m2.xlarge": "0.296",
|
||||
"m2.2xlarge": "0.592",
|
||||
"m2.4xlarge": "1.183",
|
||||
"i2.xlarge": "1.018",
|
||||
"i2.2xlarge": "2.035",
|
||||
"i2.4xlarge": "4.070",
|
||||
"i2.8xlarge": "8.140",
|
||||
"hs1.8xlarge": "5.570",
|
||||
"t1.micro": "0.020"
|
||||
},
|
||||
"cloudsigma_lvs": {
|
||||
"high-cpu-medium": 0.0,
|
||||
"standard-large": 0.0,
|
||||
"micro-high-cpu": 0.0,
|
||||
"standard-extra-large": 0.0,
|
||||
"high-memory-double-extra-large": 0.0,
|
||||
"micro-regular": 0.0,
|
||||
"standard-small": 0.0,
|
||||
"high-memory-extra-large": 0.0,
|
||||
"high-cpu-extra-large": 0.0
|
||||
},
|
||||
"dreamhost": {
|
||||
"default": 115,
|
||||
"high": 150,
|
||||
"minimum": 15,
|
||||
"maximum": 200,
|
||||
"low": 50
|
||||
},
|
||||
"osc_sas_eu_west_3": {
|
||||
"t1.micro": "0.040",
|
||||
"m1.small": "0.090",
|
||||
"m1.medium": "0.130",
|
||||
"m1.large": "0.360",
|
||||
"m1.xlarge": "0.730",
|
||||
"c1.medium": "0.230",
|
||||
"c1.xlarge": "0.900",
|
||||
"m2.xlarge": "0.460",
|
||||
"m2.2xlarge": "0.920",
|
||||
"m2.4xlarge": "1.840",
|
||||
"nv1.small": "5.220",
|
||||
"nv1.medium": "5.310",
|
||||
"nv1.large": "5.490",
|
||||
"nv1.xlarge": "5.860",
|
||||
"cc1.4xlarge": "1.460",
|
||||
"cc2.8xlarge": "2.700",
|
||||
"m3.xlarge": "0.780",
|
||||
"m3.2xlarge": "1.560",
|
||||
"cr1.8xlarge": "3.750",
|
||||
"os1.8xlarge": "6.400",
|
||||
"os1.8xlarge": "6.400"
|
||||
},
|
||||
"osc_sas_eu_west_1": {
|
||||
"t1.micro": "0.040",
|
||||
"m1.small": "0.090",
|
||||
"m1.medium": "0.130",
|
||||
"m1.large": "0.360",
|
||||
"m1.xlarge": "0.730",
|
||||
"c1.medium": "0.230",
|
||||
"c1.xlarge": "0.900",
|
||||
"m2.xlarge": "0.460",
|
||||
"m2.2xlarge": "0.920",
|
||||
"m2.4xlarge": "1.840",
|
||||
"nv1.small": "5.220",
|
||||
"nv1.medium": "5.310",
|
||||
"nv1.large": "5.490",
|
||||
"nv1.xlarge": "5.860",
|
||||
"cc1.4xlarge": "1.460",
|
||||
"cc2.8xlarge": "2.700",
|
||||
"m3.xlarge": "0.780",
|
||||
"m3.2xlarge": "1.560",
|
||||
"cr1.8xlarge": "3.750",
|
||||
"os1.8xlarge": "6.400",
|
||||
"os1.8xlarge": "6.400"
|
||||
},
|
||||
"osc_sas_us_east_1": {
|
||||
"t1.micro": "0.020",
|
||||
"m1.small": "0.070",
|
||||
"m1.medium": "0.180",
|
||||
"m1.large": "0.260",
|
||||
"m1.xlarge": "0.730",
|
||||
"c1.medium": "0.170",
|
||||
"c1.xlarge": "0.660",
|
||||
"m2.xlarge": "0.460",
|
||||
"m2.2xlarge": "1.020",
|
||||
"m2.4xlarge": "2.040",
|
||||
"nv1.small": "5.220",
|
||||
"nv1.medium": "5.310",
|
||||
"nv1.large": "5.490",
|
||||
"nv1.xlarge": "5.860",
|
||||
"cc1.4xlarge": "1.610",
|
||||
"cc2.8xlarge": "2.700",
|
||||
"m3.xlarge": "0.550",
|
||||
"m3.2xlarge": "1.560",
|
||||
"cr1.8xlarge": "3.750",
|
||||
"os1.8xlarge": "6.400",
|
||||
"os1.8xlarge": "6.400"
|
||||
},
|
||||
"osc_inc_eu_west_1": {
|
||||
"t1.micro": "0.040",
|
||||
"m1.small": "0.090",
|
||||
"m1.medium": "0.120",
|
||||
"m1.large": "0.360",
|
||||
"m1.xlarge": "0.730",
|
||||
"c1.medium": "0.230",
|
||||
"c1.xlarge": "0.900",
|
||||
"m2.xlarge": "0.410",
|
||||
"m2.2xlarge": "0.820",
|
||||
"m2.4xlarge": "1.640",
|
||||
"nv1.small": "5.220",
|
||||
"nv1.medium": "5.250",
|
||||
"nv1.large": "5.490",
|
||||
"nv1.xlarge": "5.610",
|
||||
"cc1.4xlarge": "1.300",
|
||||
"cc2.8xlarge": "2.400",
|
||||
"m3.xlarge": "0.780",
|
||||
"m3.2xlarge": "1.560",
|
||||
"cr1.8xlarge": "3.500",
|
||||
"os1.8xlarge": "4.310",
|
||||
"os1.8xlarge": "4.310"
|
||||
},
|
||||
"osc_inc_eu_west_3": {
|
||||
"t1.micro": "0.040",
|
||||
"m1.small": "0.090",
|
||||
"m1.medium": "0.120",
|
||||
"m1.large": "0.360",
|
||||
"m1.xlarge": "0.730",
|
||||
"c1.medium": "0.230",
|
||||
"c1.xlarge": "0.900",
|
||||
"m2.xlarge": "0.410",
|
||||
"m2.2xlarge": "0.820",
|
||||
"m2.4xlarge": "1.640",
|
||||
"nv1.small": "5.220",
|
||||
"nv1.medium": "5.250",
|
||||
"nv1.large": "5.490",
|
||||
"nv1.xlarge": "5.610",
|
||||
"cc1.4xlarge": "1.300",
|
||||
"cc2.8xlarge": "2.400",
|
||||
"m3.xlarge": "0.780",
|
||||
"m3.2xlarge": "1.560",
|
||||
"cr1.8xlarge": "3.500",
|
||||
"os1.8xlarge": "4.310",
|
||||
"os1.8xlarge": "4.310"
|
||||
},
|
||||
"osc_inc_us_east_1": {
|
||||
"t1.micro": "0.020",
|
||||
"m1.small": "0.060",
|
||||
"m1.medium": "0.180",
|
||||
"m1.large": "0.240",
|
||||
"m1.xlarge": "0.730",
|
||||
"c1.medium": "0.150",
|
||||
"c1.xlarge": "0.580",
|
||||
"m2.xlarge": "0.410",
|
||||
"m2.2xlarge": "1.020",
|
||||
"m2.4xlarge": "2.040",
|
||||
"nv1.small": "5.190",
|
||||
"nv1.medium": "5.250",
|
||||
"nv1.large": "5.490",
|
||||
"nv1.xlarge": "5.610",
|
||||
"cc1.4xlarge": "1.610",
|
||||
"cc2.8xlarge": "2.400",
|
||||
"m3.xlarge": "0.500",
|
||||
"m3.2xlarge": "1.560",
|
||||
"cr1.8xlarge": "3.500",
|
||||
"os1.8xlarge": "6.400",
|
||||
"os1.8xlarge": "6.400"
|
||||
}
|
||||
},
|
||||
"storage": {},
|
||||
"updated": 1397154837
|
||||
}
|
||||
0
awx/lib/site-packages/libcloud/dns/__init__.py
Normal file
0
awx/lib/site-packages/libcloud/dns/__init__.py
Normal file
486
awx/lib/site-packages/libcloud/dns/base.py
Normal file
486
awx/lib/site-packages/libcloud/dns/base.py
Normal file
@@ -0,0 +1,486 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import datetime
|
||||
|
||||
from libcloud import __version__
|
||||
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
|
||||
from libcloud.dns.types import RecordType
|
||||
|
||||
__all__ = [
|
||||
'Zone',
|
||||
'Record',
|
||||
'DNSDriver'
|
||||
]
|
||||
|
||||
|
||||
class Zone(object):
|
||||
"""
|
||||
DNS zone.
|
||||
"""
|
||||
|
||||
def __init__(self, id, domain, type, ttl, driver, extra=None):
|
||||
"""
|
||||
:param id: Zone id.
|
||||
:type id: ``str``
|
||||
|
||||
:param domain: The name of the domain.
|
||||
:type domain: ``str``
|
||||
|
||||
:param type: Zone type (master, slave).
|
||||
:type type: ``str``
|
||||
|
||||
:param ttl: Default TTL for records in this zone (in seconds).
|
||||
:type ttl: ``int``
|
||||
|
||||
:param driver: DNSDriver instance.
|
||||
:type driver: :class:`DNSDriver`
|
||||
|
||||
:param extra: (optional) Extra attributes (driver specific).
|
||||
:type extra: ``dict``
|
||||
"""
|
||||
self.id = str(id) if id else None
|
||||
self.domain = domain
|
||||
self.type = type
|
||||
self.ttl = ttl or None
|
||||
self.driver = driver
|
||||
self.extra = extra or {}
|
||||
|
||||
def list_records(self):
|
||||
return self.driver.list_records(zone=self)
|
||||
|
||||
def create_record(self, name, type, data, extra=None):
|
||||
return self.driver.create_record(name=name, zone=self, type=type,
|
||||
data=data, extra=extra)
|
||||
|
||||
def update(self, domain=None, type=None, ttl=None, extra=None):
|
||||
return self.driver.update_zone(zone=self, domain=domain, type=type,
|
||||
ttl=ttl, extra=extra)
|
||||
|
||||
def delete(self):
|
||||
return self.driver.delete_zone(zone=self)
|
||||
|
||||
def export_to_bind_format(self):
|
||||
return self.driver.export_zone_to_bind_format(zone=self)
|
||||
|
||||
def export_to_bind_zone_file(self, file_path):
|
||||
self.driver.export_zone_to_bind_zone_file(zone=self,
|
||||
file_path=file_path)
|
||||
|
||||
def __repr__(self):
|
||||
return ('<Zone: domain=%s, ttl=%s, provider=%s ...>' %
|
||||
(self.domain, self.ttl, self.driver.name))
|
||||
|
||||
|
||||
class Record(object):
|
||||
"""
|
||||
Zone record / resource.
|
||||
"""
|
||||
|
||||
def __init__(self, id, name, type, data, zone, driver, extra=None):
|
||||
"""
|
||||
:param id: Record id
|
||||
:type id: ``str``
|
||||
|
||||
:param name: Hostname or FQDN.
|
||||
:type name: ``str``
|
||||
|
||||
:param type: DNS record type (A, AAAA, ...).
|
||||
:type type: :class:`RecordType`
|
||||
|
||||
:param data: Data for the record (depends on the record type).
|
||||
:type data: ``str``
|
||||
|
||||
:param zone: Zone instance.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:param driver: DNSDriver instance.
|
||||
:type driver: :class:`DNSDriver`
|
||||
|
||||
:param extra: (optional) Extra attributes (driver specific).
|
||||
:type extra: ``dict``
|
||||
"""
|
||||
self.id = str(id) if id else None
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.data = data
|
||||
self.zone = zone
|
||||
self.driver = driver
|
||||
self.extra = extra or {}
|
||||
|
||||
def update(self, name=None, type=None, data=None, extra=None):
|
||||
return self.driver.update_record(record=self, name=name, type=type,
|
||||
data=data, extra=extra)
|
||||
|
||||
def delete(self):
|
||||
return self.driver.delete_record(record=self)
|
||||
|
||||
def _get_numeric_id(self):
|
||||
record_id = self.id
|
||||
|
||||
if record_id.isdigit():
|
||||
record_id = int(record_id)
|
||||
|
||||
return record_id
|
||||
|
||||
def __repr__(self):
|
||||
return ('<Record: zone=%s, name=%s, type=%s, data=%s, provider=%s '
|
||||
'...>' %
|
||||
(self.zone.id, self.name, self.type, self.data,
|
||||
self.driver.name))
|
||||
|
||||
|
||||
class DNSDriver(BaseDriver):
|
||||
"""
|
||||
A base DNSDriver class to derive from
|
||||
|
||||
This class is always subclassed by a specific driver.
|
||||
"""
|
||||
connectionCls = ConnectionUserAndKey
|
||||
name = None
|
||||
website = None
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
**kwargs):
|
||||
"""
|
||||
:param key: API key or username to used (required)
|
||||
:type key: ``str``
|
||||
|
||||
:param secret: Secret password to be used (required)
|
||||
:type secret: ``str``
|
||||
|
||||
:param secure: Weither to use HTTPS or HTTP. Note: Some providers
|
||||
only support HTTPS, and it is on by default.
|
||||
:type secure: ``bool``
|
||||
|
||||
:param host: Override hostname used for connections.
|
||||
:type host: ``str``
|
||||
|
||||
:param port: Override port used for connections.
|
||||
:type port: ``int``
|
||||
|
||||
:return: ``None``
|
||||
"""
|
||||
super(DNSDriver, self).__init__(key=key, secret=secret, secure=secure,
|
||||
host=host, port=port, **kwargs)
|
||||
|
||||
def list_record_types(self):
|
||||
"""
|
||||
Return a list of RecordType objects supported by the provider.
|
||||
|
||||
:return: ``list`` of :class:`RecordType`
|
||||
"""
|
||||
return list(self.RECORD_TYPE_MAP.keys())
|
||||
|
||||
def iterate_zones(self):
|
||||
"""
|
||||
Return a generator to iterate over available zones.
|
||||
|
||||
:rtype: ``generator`` of :class:`Zone`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'iterate_zones not implemented for this driver')
|
||||
|
||||
def list_zones(self):
|
||||
"""
|
||||
Return a list of zones.
|
||||
|
||||
:return: ``list`` of :class:`Zone`
|
||||
"""
|
||||
return list(self.iterate_zones())
|
||||
|
||||
def iterate_records(self, zone):
|
||||
"""
|
||||
Return a generator to iterate over records for the provided zone.
|
||||
|
||||
:param zone: Zone to list records for.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:rtype: ``generator`` of :class:`Record`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'iterate_records not implemented for this driver')
|
||||
|
||||
def list_records(self, zone):
|
||||
"""
|
||||
Return a list of records for the provided zone.
|
||||
|
||||
:param zone: Zone to list records for.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:return: ``list`` of :class:`Record`
|
||||
"""
|
||||
return list(self.iterate_records(zone))
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
"""
|
||||
Return a Zone instance.
|
||||
|
||||
:param zone_id: ID of the required zone
|
||||
:type zone_id: ``str``
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'get_zone not implemented for this driver')
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
"""
|
||||
Return a Record instance.
|
||||
|
||||
:param zone_id: ID of the required zone
|
||||
:type zone_id: ``str``
|
||||
|
||||
:param record_id: ID of the required record
|
||||
:type record_id: ``str``
|
||||
|
||||
:rtype: :class:`Record`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'get_record not implemented for this driver')
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
"""
|
||||
Create a new zone.
|
||||
|
||||
:param domain: Zone domain name (e.g. example.com)
|
||||
:type domain: ``str``
|
||||
|
||||
:param type: Zone type (master / slave).
|
||||
:type type: ``str``
|
||||
|
||||
:param ttl: TTL for new records. (optional)
|
||||
:type ttl: ``int``
|
||||
|
||||
:param extra: Extra attributes (driver specific). (optional)
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'create_zone not implemented for this driver')
|
||||
|
||||
def update_zone(self, zone, domain, type='master', ttl=None, extra=None):
|
||||
"""
|
||||
Update en existing zone.
|
||||
|
||||
:param zone: Zone to update.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:param domain: Zone domain name (e.g. example.com)
|
||||
:type domain: ``str``
|
||||
|
||||
:param type: Zone type (master / slave).
|
||||
:type type: ``str``
|
||||
|
||||
:param ttl: TTL for new records. (optional)
|
||||
:type ttl: ``int``
|
||||
|
||||
:param extra: Extra attributes (driver specific). (optional)
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'update_zone not implemented for this driver')
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
"""
|
||||
Create a new record.
|
||||
|
||||
:param name: Record name without the domain name (e.g. www).
|
||||
Note: If you want to create a record for a base domain
|
||||
name, you should specify empty string ('') for this
|
||||
argument.
|
||||
:type name: ``str``
|
||||
|
||||
:param zone: Zone where the requested record is created.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:param type: DNS record type (A, AAAA, ...).
|
||||
:type type: :class:`RecordType`
|
||||
|
||||
:param data: Data for the record (depends on the record type).
|
||||
:type data: ``str``
|
||||
|
||||
:param extra: Extra attributes (driver specific). (optional)
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Record`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'create_record not implemented for this driver')
|
||||
|
||||
def update_record(self, record, name, type, data, extra):
|
||||
"""
|
||||
Update an existing record.
|
||||
|
||||
:param record: Record to update.
|
||||
:type record: :class:`Record`
|
||||
|
||||
:param name: Record name without the domain name (e.g. www).
|
||||
Note: If you want to create a record for a base domain
|
||||
name, you should specify empty string ('') for this
|
||||
argument.
|
||||
:type name: ``str``
|
||||
|
||||
:param type: DNS record type (A, AAAA, ...).
|
||||
:type type: :class:`RecordType`
|
||||
|
||||
:param data: Data for the record (depends on the record type).
|
||||
:type data: ``str``
|
||||
|
||||
:param extra: (optional) Extra attributes (driver specific).
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Record`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'update_record not implemented for this driver')
|
||||
|
||||
def delete_zone(self, zone):
|
||||
"""
|
||||
Delete a zone.
|
||||
|
||||
Note: This will delete all the records belonging to this zone.
|
||||
|
||||
:param zone: Zone to delete.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'delete_zone not implemented for this driver')
|
||||
|
||||
def delete_record(self, record):
|
||||
"""
|
||||
Delete a record.
|
||||
|
||||
:param record: Record to delete.
|
||||
:type record: :class:`Record`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'delete_record not implemented for this driver')
|
||||
|
||||
def export_zone_to_bind_format(self, zone):
|
||||
"""
|
||||
Export Zone object to the BIND compatible format.
|
||||
|
||||
:param zone: Zone to export.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:return: Zone data in BIND compatible format.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
if zone.type != 'master':
|
||||
raise ValueError('You can only generate BIND out for master zones')
|
||||
|
||||
lines = []
|
||||
|
||||
# For consistent output, records are sorted based on the id
|
||||
records = zone.list_records()
|
||||
records = sorted(records, key=Record._get_numeric_id)
|
||||
|
||||
date = datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')
|
||||
values = {'version': __version__, 'date': date}
|
||||
|
||||
lines.append('; Generated by Libcloud v%(version)s on %(date)s' %
|
||||
values)
|
||||
lines.append('$ORIGIN %(domain)s.' % {'domain': zone.domain})
|
||||
lines.append('$TTL %(domain_ttl)s\n' % {'domain_ttl': zone.ttl})
|
||||
|
||||
for record in records:
|
||||
line = self._get_bind_record_line(record=record)
|
||||
lines.append(line)
|
||||
|
||||
output = '\n'.join(lines)
|
||||
return output
|
||||
|
||||
def export_zone_to_bind_zone_file(self, zone, file_path):
|
||||
"""
|
||||
Export Zone object to the BIND compatible format and write result to a
|
||||
file.
|
||||
|
||||
:param zone: Zone to export.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:param file_path: File path where the output will be saved.
|
||||
:type file_path: ``str``
|
||||
"""
|
||||
result = self.export_zone_to_bind_format(zone=zone)
|
||||
|
||||
with open(file_path, 'w') as fp:
|
||||
fp.write(result)
|
||||
|
||||
def _get_bind_record_line(self, record):
|
||||
"""
|
||||
Generate BIND record line for the provided record.
|
||||
|
||||
:param record: Record to generate the line for.
|
||||
:type record: :class:`Record`
|
||||
|
||||
:return: Bind compatible record line.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
parts = []
|
||||
|
||||
if record.name:
|
||||
name = '%(name)s.%(domain)s' % {'name': record.name,
|
||||
'domain': record.zone.domain}
|
||||
else:
|
||||
name = record.zone.domain
|
||||
|
||||
name += '.'
|
||||
|
||||
ttl = record.extra['ttl'] if 'ttl' in record.extra else record.zone.ttl
|
||||
ttl = str(ttl)
|
||||
data = record.data
|
||||
|
||||
if record.type in [RecordType.CNAME, RecordType.DNAME, RecordType.MX,
|
||||
RecordType.PTR, RecordType.SRV]:
|
||||
# Make sure trailing dot is present
|
||||
if data[len(data) - 1] != '.':
|
||||
data += '.'
|
||||
|
||||
if record.type in [RecordType.TXT, RecordType.SPF] and ' ' in data:
|
||||
# Escape the quotes
|
||||
data = data.replace('"', '\\"')
|
||||
|
||||
# Quote the string
|
||||
data = '"%s"' % (data)
|
||||
|
||||
if record.type in [RecordType.MX, RecordType.SRV]:
|
||||
priority = str(record.extra['priority'])
|
||||
parts = [name, ttl, 'IN', record.type, priority, data]
|
||||
else:
|
||||
parts = [name, ttl, 'IN', record.type, data]
|
||||
|
||||
line = '\t'.join(parts)
|
||||
return line
|
||||
|
||||
def _string_to_record_type(self, string):
|
||||
"""
|
||||
Return a string representation of a DNS record type to a
|
||||
libcloud RecordType ENUM.
|
||||
|
||||
:rtype: ``str``
|
||||
"""
|
||||
string = string.upper()
|
||||
record_type = getattr(RecordType, string)
|
||||
return record_type
|
||||
218
awx/lib/site-packages/libcloud/dns/drivers/dummy.py
Normal file
218
awx/lib/site-packages/libcloud/dns/drivers/dummy.py
Normal file
@@ -0,0 +1,218 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
from libcloud.dns.types import RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
|
||||
from libcloud.dns.types import RecordDoesNotExistError
|
||||
from libcloud.dns.types import RecordAlreadyExistsError
|
||||
|
||||
|
||||
class DummyDNSDriver(DNSDriver):
|
||||
"""
|
||||
Dummy DNS driver.
|
||||
|
||||
>>> from libcloud.dns.drivers.dummy import DummyDNSDriver
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> driver.name
|
||||
'Dummy DNS Provider'
|
||||
"""
|
||||
|
||||
name = 'Dummy DNS Provider'
|
||||
website = 'http://example.com'
|
||||
|
||||
def __init__(self, api_key, api_secret):
|
||||
"""
|
||||
:param api_key: API key or username to used (required)
|
||||
:type api_key: ``str``
|
||||
|
||||
:param api_secret: Secret password to be used (required)
|
||||
:type api_secret: ``str``
|
||||
|
||||
:rtype: ``None``
|
||||
"""
|
||||
self._zones = {}
|
||||
|
||||
def list_record_types(self):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> driver.list_record_types()
|
||||
['A']
|
||||
|
||||
@inherits: :class:`DNSDriver.list_record_types`
|
||||
"""
|
||||
return [RecordType.A]
|
||||
|
||||
def list_zones(self):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> driver.list_zones()
|
||||
[]
|
||||
|
||||
@inherits: :class:`DNSDriver.list_zones`
|
||||
"""
|
||||
|
||||
return [zone['zone'] for zone in list(self._zones.values())]
|
||||
|
||||
def list_records(self, zone):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> zone = driver.create_zone(domain='apache.org', type='master',
|
||||
... ttl=100)
|
||||
>>> list(zone.list_records())
|
||||
[]
|
||||
>>> record = driver.create_record(name='libcloud', zone=zone,
|
||||
... type=RecordType.A, data='127.0.0.1')
|
||||
>>> list(zone.list_records()) #doctest: +ELLIPSIS
|
||||
[<Record: zone=id-apache.org, name=libcloud, type=A...>]
|
||||
"""
|
||||
return self._zones[zone.id]['records'].values()
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> driver.get_zone(zone_id='foobar')
|
||||
... #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
ZoneDoesNotExistError:
|
||||
|
||||
@inherits: :class:`DNSDriver.get_zone`
|
||||
"""
|
||||
|
||||
if zone_id not in self._zones:
|
||||
raise ZoneDoesNotExistError(driver=self, value=None,
|
||||
zone_id=zone_id)
|
||||
|
||||
return self._zones[zone_id]['zone']
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> driver.get_record(zone_id='doesnotexist', record_id='exists')
|
||||
... #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
ZoneDoesNotExistError:
|
||||
|
||||
@inherits: :class:`DNSDriver.get_record`
|
||||
"""
|
||||
|
||||
self.get_zone(zone_id=zone_id)
|
||||
zone_records = self._zones[zone_id]['records']
|
||||
|
||||
if record_id not in zone_records:
|
||||
raise RecordDoesNotExistError(record_id=record_id, value=None,
|
||||
driver=self)
|
||||
|
||||
return zone_records[record_id]
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> zone = driver.create_zone(domain='apache.org', type='master',
|
||||
... ttl=100)
|
||||
>>> zone
|
||||
<Zone: domain=apache.org, ttl=100, provider=Dummy DNS Provider ...>
|
||||
>>> zone = driver.create_zone(domain='apache.org', type='master',
|
||||
... ttl=100)
|
||||
... #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
ZoneAlreadyExistsError:
|
||||
|
||||
@inherits: :class:`DNSDriver.create_zone`
|
||||
"""
|
||||
|
||||
id = 'id-%s' % (domain)
|
||||
|
||||
if id in self._zones:
|
||||
raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self)
|
||||
|
||||
zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={},
|
||||
driver=self)
|
||||
self._zones[id] = {'zone': zone,
|
||||
'records': {}}
|
||||
return zone
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> zone = driver.create_zone(domain='apache.org', type='master',
|
||||
... ttl=100)
|
||||
>>> record = driver.create_record(name='libcloud', zone=zone,
|
||||
... type=RecordType.A, data='127.0.0.1')
|
||||
>>> record #doctest: +ELLIPSIS
|
||||
<Record: zone=id-apache.org, name=libcloud, type=A, data=127.0.0.1...>
|
||||
>>> record = driver.create_record(name='libcloud', zone=zone,
|
||||
... type=RecordType.A, data='127.0.0.1')
|
||||
... #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
RecordAlreadyExistsError:
|
||||
|
||||
@inherits: :class:`DNSDriver.create_record`
|
||||
"""
|
||||
id = 'id-%s' % (name)
|
||||
|
||||
zone = self.get_zone(zone_id=zone.id)
|
||||
|
||||
if id in self._zones[zone.id]['records']:
|
||||
raise RecordAlreadyExistsError(record_id=id, value=None,
|
||||
driver=self)
|
||||
|
||||
record = Record(id=id, name=name, type=type, data=data, extra=extra,
|
||||
zone=zone, driver=self)
|
||||
self._zones[zone.id]['records'][id] = record
|
||||
return record
|
||||
|
||||
def delete_zone(self, zone):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> zone = driver.create_zone(domain='apache.org', type='master',
|
||||
... ttl=100)
|
||||
>>> driver.delete_zone(zone)
|
||||
True
|
||||
>>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
ZoneDoesNotExistError:
|
||||
|
||||
@inherits: :class:`DNSDriver.delete_zone`
|
||||
"""
|
||||
self.get_zone(zone_id=zone.id)
|
||||
|
||||
del self._zones[zone.id]
|
||||
return True
|
||||
|
||||
def delete_record(self, record):
|
||||
"""
|
||||
>>> driver = DummyDNSDriver('key', 'secret')
|
||||
>>> zone = driver.create_zone(domain='apache.org', type='master',
|
||||
... ttl=100)
|
||||
>>> record = driver.create_record(name='libcloud', zone=zone,
|
||||
... type=RecordType.A, data='127.0.0.1')
|
||||
>>> driver.delete_record(record)
|
||||
True
|
||||
>>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
RecordDoesNotExistError:
|
||||
|
||||
@inherits: :class:`DNSDriver.delete_record`
|
||||
"""
|
||||
self.get_record(zone_id=record.zone.id, record_id=record.id)
|
||||
|
||||
del self._zones[record.zone.id]['records'][record.id]
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
270
awx/lib/site-packages/libcloud/dns/drivers/gandi.py
Normal file
270
awx/lib/site-packages/libcloud/dns/drivers/gandi.py
Normal file
@@ -0,0 +1,270 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
__all__ = [
|
||||
'GandiDNSDriver'
|
||||
]
|
||||
|
||||
from libcloud.common.gandi import BaseGandiDriver, GandiConnection
|
||||
from libcloud.common.gandi import GandiResponse
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import RecordError
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
|
||||
|
||||
TTL_MIN = 30
|
||||
TTL_MAX = 2592000 # 30 days
|
||||
|
||||
|
||||
class NewZoneVersion(object):
|
||||
"""
|
||||
Changes to a zone in the Gandi DNS service need to be wrapped in a new
|
||||
version object. The changes are made to the new version, then that
|
||||
version is made active.
|
||||
|
||||
In effect, this is a transaction.
|
||||
|
||||
Any calls made inside this context manager will be applied to a new version
|
||||
id. If your changes are succesful (and only if they are successful) they
|
||||
are activated.
|
||||
"""
|
||||
|
||||
def __init__(self, driver, zone):
|
||||
self.driver = driver
|
||||
self.connection = driver.connection
|
||||
self.zone = zone
|
||||
|
||||
def __enter__(self):
|
||||
zid = int(self.zone.id)
|
||||
self.connection.set_context({'zone_id': self.zone.id})
|
||||
vid = self.connection.request('domain.zone.version.new', zid).object
|
||||
self.vid = vid
|
||||
return vid
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if not traceback:
|
||||
zid = int(self.zone.id)
|
||||
con = self.connection
|
||||
con.set_context({'zone_id': self.zone.id})
|
||||
con.request('domain.zone.version.set', zid, self.vid).object
|
||||
|
||||
|
||||
class GandiDNSResponse(GandiResponse):
|
||||
exceptions = {
|
||||
581042: ZoneDoesNotExistError,
|
||||
}
|
||||
|
||||
|
||||
class GandiDNSConnection(GandiConnection):
|
||||
responseCls = GandiDNSResponse
|
||||
|
||||
|
||||
class GandiDNSDriver(BaseGandiDriver, DNSDriver):
|
||||
"""
|
||||
API reference can be found at:
|
||||
|
||||
http://doc.rpc.gandi.net/domain/reference.html
|
||||
"""
|
||||
|
||||
type = Provider.GANDI
|
||||
name = 'Gandi DNS'
|
||||
website = 'http://www.gandi.net/domain'
|
||||
|
||||
connectionCls = GandiDNSConnection
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.LOC: 'LOC',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.NS: 'NS',
|
||||
RecordType.SPF: 'SPF',
|
||||
RecordType.SRV: 'SRV',
|
||||
RecordType.TXT: 'TXT',
|
||||
RecordType.WKS: 'WKS',
|
||||
}
|
||||
|
||||
def _to_zone(self, zone):
|
||||
return Zone(
|
||||
id=str(zone['id']),
|
||||
domain=zone['name'],
|
||||
type='master',
|
||||
ttl=0,
|
||||
driver=self,
|
||||
extra={}
|
||||
)
|
||||
|
||||
def _to_zones(self, zones):
|
||||
ret = []
|
||||
for z in zones:
|
||||
ret.append(self._to_zone(z))
|
||||
return ret
|
||||
|
||||
def list_zones(self):
|
||||
zones = self.connection.request('domain.zone.list')
|
||||
return self._to_zones(zones.object)
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
zid = int(zone_id)
|
||||
self.connection.set_context({'zone_id': zone_id})
|
||||
zone = self.connection.request('domain.zone.info', zid)
|
||||
return self._to_zone(zone.object)
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
params = {
|
||||
'name': domain,
|
||||
}
|
||||
info = self.connection.request('domain.zone.create', params)
|
||||
return self._to_zone(info.object)
|
||||
|
||||
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
|
||||
zid = int(zone.id)
|
||||
params = {'name': domain}
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
zone = self.connection.request('domain.zone.update', zid, params)
|
||||
return self._to_zone(zone.object)
|
||||
|
||||
def delete_zone(self, zone):
|
||||
zid = int(zone.id)
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
res = self.connection.request('domain.zone.delete', zid)
|
||||
return res.object
|
||||
|
||||
def _to_record(self, record, zone):
|
||||
return Record(
|
||||
id='%s:%s' % (record['type'], record['name']),
|
||||
name=record['name'],
|
||||
type=self._string_to_record_type(record['type']),
|
||||
data=record['value'],
|
||||
zone=zone,
|
||||
driver=self,
|
||||
extra={'ttl': record['ttl']}
|
||||
)
|
||||
|
||||
def _to_records(self, records, zone):
|
||||
retval = []
|
||||
for r in records:
|
||||
retval.append(self._to_record(r, zone))
|
||||
return retval
|
||||
|
||||
def list_records(self, zone):
|
||||
zid = int(zone.id)
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
records = self.connection.request('domain.zone.record.list', zid, 0)
|
||||
return self._to_records(records.object, zone)
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
zid = int(zone_id)
|
||||
record_type, name = record_id.split(':', 1)
|
||||
filter_opts = {
|
||||
'name': name,
|
||||
'type': record_type
|
||||
}
|
||||
self.connection.set_context({'zone_id': zone_id})
|
||||
records = self.connection.request('domain.zone.record.list',
|
||||
zid, 0, filter_opts).object
|
||||
|
||||
if len(records) == 0:
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=record_id)
|
||||
|
||||
return self._to_record(records[0], self.get_zone(zone_id))
|
||||
|
||||
def _validate_record(self, record_id, name, record_type, data, extra):
|
||||
if len(data) > 1024:
|
||||
raise RecordError('Record data must be <= 1024 characters',
|
||||
driver=self, record_id=record_id)
|
||||
if extra and 'ttl' in extra:
|
||||
if extra['ttl'] < TTL_MIN:
|
||||
raise RecordError('TTL must be at least 30 seconds',
|
||||
driver=self, record_id=record_id)
|
||||
if extra['ttl'] > TTL_MAX:
|
||||
raise RecordError('TTL must not excdeed 30 days',
|
||||
driver=self, record_id=record_id)
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
self._validate_record(None, name, type, data, extra)
|
||||
|
||||
zid = int(zone.id)
|
||||
|
||||
create = {
|
||||
'name': name,
|
||||
'type': self.RECORD_TYPE_MAP[type],
|
||||
'value': data
|
||||
}
|
||||
|
||||
if 'ttl' in extra:
|
||||
create['ttl'] = extra['ttl']
|
||||
|
||||
with NewZoneVersion(self, zone) as vid:
|
||||
con = self.connection
|
||||
con.set_context({'zone_id': zone.id})
|
||||
rec = con.request('domain.zone.record.add',
|
||||
zid, vid, create).object
|
||||
|
||||
return self._to_record(rec, zone)
|
||||
|
||||
def update_record(self, record, name, type, data, extra):
|
||||
self._validate_record(record.id, name, type, data, extra)
|
||||
|
||||
filter_opts = {
|
||||
'name': record.name,
|
||||
'type': self.RECORD_TYPE_MAP[record.type]
|
||||
}
|
||||
|
||||
update = {
|
||||
'name': name,
|
||||
'type': self.RECORD_TYPE_MAP[type],
|
||||
'value': data
|
||||
}
|
||||
|
||||
if 'ttl' in extra:
|
||||
update['ttl'] = extra['ttl']
|
||||
|
||||
zid = int(record.zone.id)
|
||||
|
||||
with NewZoneVersion(self, record.zone) as vid:
|
||||
con = self.connection
|
||||
con.set_context({'zone_id': record.zone.id})
|
||||
con.request('domain.zone.record.delete',
|
||||
zid, vid, filter_opts)
|
||||
res = con.request('domain.zone.record.add',
|
||||
zid, vid, update).object
|
||||
|
||||
return self._to_record(res, record.zone)
|
||||
|
||||
def delete_record(self, record):
|
||||
zid = int(record.zone.id)
|
||||
|
||||
filter_opts = {
|
||||
'name': record.name,
|
||||
'type': self.RECORD_TYPE_MAP[record.type]
|
||||
}
|
||||
|
||||
with NewZoneVersion(self, record.zone) as vid:
|
||||
con = self.connection
|
||||
con.set_context({'zone_id': record.zone.id})
|
||||
count = con.request('domain.zone.record.delete',
|
||||
zid, vid, filter_opts).object
|
||||
|
||||
if count == 1:
|
||||
return True
|
||||
|
||||
raise RecordDoesNotExistError(value='No such record', driver=self,
|
||||
record_id=record.id)
|
||||
345
awx/lib/site-packages/libcloud/dns/drivers/google.py
Normal file
345
awx/lib/site-packages/libcloud/dns/drivers/google.py
Normal file
@@ -0,0 +1,345 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'GoogleDNSDriver'
|
||||
]
|
||||
|
||||
API_VERSION = 'v1beta1'
|
||||
|
||||
import re
|
||||
from libcloud.common.google import GoogleResponse, GoogleBaseConnection
|
||||
from libcloud.common.google import ResourceNotFoundError
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
|
||||
|
||||
class GoogleDNSResponse(GoogleResponse):
|
||||
pass
|
||||
|
||||
|
||||
class GoogleDNSConnection(GoogleBaseConnection):
|
||||
host = "www.googleapis.com"
|
||||
responseCls = GoogleDNSResponse
|
||||
|
||||
def __init__(self, user_id, key, secure, auth_type=None,
|
||||
credential_file=None, project=None, **kwargs):
|
||||
super(GoogleDNSConnection, self).\
|
||||
__init__(user_id, key, secure=secure, auth_type=auth_type,
|
||||
credential_file=credential_file, **kwargs)
|
||||
self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project)
|
||||
|
||||
|
||||
class GoogleDNSDriver(DNSDriver):
|
||||
type = Provider.GOOGLE
|
||||
name = 'Google DNS'
|
||||
connectionCls = GoogleDNSConnection
|
||||
website = 'https://cloud.google.com/'
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.NS: 'NS',
|
||||
RecordType.PTR: 'PTR',
|
||||
RecordType.SOA: 'SOA',
|
||||
RecordType.SPF: 'SPF',
|
||||
RecordType.SRV: 'SRV',
|
||||
RecordType.TXT: 'TXT',
|
||||
}
|
||||
|
||||
def __init__(self, user_id, key, project=None, auth_type=None, scopes=None,
|
||||
**kwargs):
|
||||
self.auth_type = auth_type
|
||||
self.project = project
|
||||
self.scopes = scopes
|
||||
if not self.project:
|
||||
raise ValueError('Project name must be specified using '
|
||||
'"project" keyword.')
|
||||
super(GoogleDNSDriver, self).__init__(user_id, key, scopes, **kwargs)
|
||||
|
||||
def iterate_zones(self):
|
||||
"""
|
||||
Return a generator to iterate over available zones.
|
||||
|
||||
:rtype: ``generator`` of :class:`Zone`
|
||||
"""
|
||||
return self._get_more('zones')
|
||||
|
||||
def iterate_records(self, zone):
|
||||
"""
|
||||
Return a generator to iterate over records for the provided zone.
|
||||
|
||||
:param zone: Zone to list records for.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:rtype: ``generator`` of :class:`Record`
|
||||
"""
|
||||
return self._get_more('records', zone=zone)
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
"""
|
||||
Return a Zone instance.
|
||||
|
||||
:param zone_id: ID of the required zone
|
||||
:type zone_id: ``str``
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
request = '/managedZones/%s' % (zone_id)
|
||||
|
||||
try:
|
||||
response = self.connection.request(request, method='GET').object
|
||||
except ResourceNotFoundError:
|
||||
raise ZoneDoesNotExistError(value='',
|
||||
driver=self.connection.driver,
|
||||
zone_id=zone_id)
|
||||
|
||||
return self._to_zone(response)
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
"""
|
||||
Return a Record instance.
|
||||
|
||||
:param zone_id: ID of the required zone
|
||||
:type zone_id: ``str``
|
||||
|
||||
:param record_id: ID of the required record
|
||||
:type record_id: ``str``
|
||||
|
||||
:rtype: :class:`Record`
|
||||
"""
|
||||
(record_type, record_name) = record_id.split(':', 1)
|
||||
|
||||
params = {
|
||||
'name': record_name,
|
||||
'type': record_type,
|
||||
}
|
||||
|
||||
request = '/managedZones/%s/rrsets' % (zone_id)
|
||||
|
||||
try:
|
||||
response = self.connection.request(request, method='GET',
|
||||
params=params).object
|
||||
except ResourceNotFoundError:
|
||||
raise ZoneDoesNotExistError(value='',
|
||||
driver=self.connection.driver,
|
||||
zone_id=zone_id)
|
||||
|
||||
if len(response['rrsets']) > 0:
|
||||
zone = self.get_zone(zone_id)
|
||||
return self._to_record(response['rrsets'][0], zone)
|
||||
|
||||
raise RecordDoesNotExistError(value='', driver=self.connection.driver,
|
||||
record_id=record_id)
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
"""
|
||||
Create a new zone.
|
||||
|
||||
:param domain: Zone domain name (e.g. example.com.) with a \'.\'
|
||||
at the end.
|
||||
:type domain: ``str``
|
||||
|
||||
:param type: Zone type (master is the only one supported).
|
||||
:type type: ``str``
|
||||
|
||||
:param ttl: TTL for new records. (unused)
|
||||
:type ttl: ``int``
|
||||
|
||||
:param extra: Extra attributes (driver specific). (optional)
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
name = None
|
||||
description = ''
|
||||
|
||||
if extra:
|
||||
description = extra.get('description')
|
||||
name = extra.get('name')
|
||||
|
||||
if name is None:
|
||||
name = self._cleanup_domain(domain)
|
||||
|
||||
data = {
|
||||
'dnsName': domain,
|
||||
'name': name,
|
||||
'description': description,
|
||||
}
|
||||
|
||||
request = '/managedZones'
|
||||
response = self.connection.request(request, method='POST',
|
||||
data=data).object
|
||||
return self._to_zone(response)
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
"""
|
||||
Create a new record.
|
||||
|
||||
:param name: Record name fully qualified, with a \'.\' at the end.
|
||||
:type name: ``str``
|
||||
|
||||
:param zone: Zone where the requested record is created.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:param type: DNS record type (A, AAAA, ...).
|
||||
:type type: :class:`RecordType`
|
||||
|
||||
:param data: Data for the record (depends on the record type).
|
||||
:type data: ``str``
|
||||
|
||||
:param extra: Extra attributes. (optional)
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Record`
|
||||
"""
|
||||
ttl = data.get('ttl', None)
|
||||
rrdatas = data.get('rrdatas', [])
|
||||
|
||||
data = {
|
||||
'additions': [
|
||||
{
|
||||
'name': name,
|
||||
'type': type,
|
||||
'ttl': int(ttl),
|
||||
'rrdatas': rrdatas,
|
||||
}
|
||||
]
|
||||
}
|
||||
request = '/managedZones/%s/changes' % (zone.id)
|
||||
response = self.connection.request(request, method='POST',
|
||||
data=data).object
|
||||
return self._to_record(response['additions'][0], zone)
|
||||
|
||||
def delete_zone(self, zone):
|
||||
"""
|
||||
Delete a zone.
|
||||
|
||||
Note: This will delete all the records belonging to this zone.
|
||||
|
||||
:param zone: Zone to delete.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
request = '/managedZones/%s' % (zone.id)
|
||||
response = self.connection.request(request, method='DELETE')
|
||||
return response.success()
|
||||
|
||||
def delete_record(self, record):
|
||||
"""
|
||||
Delete a record.
|
||||
|
||||
:param record: Record to delete.
|
||||
:type record: :class:`Record`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
data = {
|
||||
'deletions': [
|
||||
{
|
||||
'name': record.name,
|
||||
'type': record.type,
|
||||
'rrdatas': record.data['rrdatas'],
|
||||
'ttl': record.data['ttl']
|
||||
}
|
||||
]
|
||||
}
|
||||
request = '/managedZones/%s/changes' % (record.zone.id)
|
||||
response = self.connection.request(request, method='POST',
|
||||
data=data)
|
||||
return response.success()
|
||||
|
||||
def _get_more(self, rtype, **kwargs):
|
||||
last_key = None
|
||||
exhausted = False
|
||||
while not exhausted:
|
||||
items, last_key, exhausted = self._get_data(rtype, last_key,
|
||||
**kwargs)
|
||||
for item in items:
|
||||
yield item
|
||||
|
||||
def _get_data(self, rtype, last_key, **kwargs):
|
||||
params = {}
|
||||
|
||||
if last_key:
|
||||
params['pageToken'] = last_key
|
||||
|
||||
if rtype == 'zones':
|
||||
request = '/managedZones'
|
||||
transform_func = self._to_zones
|
||||
r_key = 'managedZones'
|
||||
elif rtype == 'records':
|
||||
zone = kwargs['zone']
|
||||
request = '/managedZones/%s/rrsets' % (zone.id)
|
||||
transform_func = self._to_records
|
||||
r_key = 'rrsets'
|
||||
|
||||
response = self.connection.request(request, method='GET',
|
||||
params=params,)
|
||||
|
||||
if response.success():
|
||||
nextpage = response.object.get('nextPageToken', None)
|
||||
items = transform_func(response.object.get(r_key), **kwargs)
|
||||
exhausted = False if nextpage is not None else True
|
||||
return items, nextpage, exhausted
|
||||
else:
|
||||
return [], None, True
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
return {'auth_type': self.auth_type,
|
||||
'project': self.project,
|
||||
'scopes': self.scopes}
|
||||
|
||||
def _to_zones(self, response):
|
||||
zones = []
|
||||
for r in response:
|
||||
zones.append(self._to_zone(r))
|
||||
return zones
|
||||
|
||||
def _to_zone(self, r):
|
||||
extra = {}
|
||||
|
||||
if 'description' in r:
|
||||
extra['description'] = r.get('description')
|
||||
|
||||
extra['creationTime'] = r.get('creationTime')
|
||||
extra['nameServers'] = r.get('nameServers')
|
||||
extra['id'] = r.get('id')
|
||||
|
||||
return Zone(id=r['name'], domain=r['dnsName'],
|
||||
type='master', ttl=0, driver=self, extra=extra)
|
||||
|
||||
def _to_records(self, response, zone):
|
||||
records = []
|
||||
for r in response:
|
||||
records.append(self._to_record(r, zone))
|
||||
return records
|
||||
|
||||
def _to_record(self, r, zone):
|
||||
record_id = '%s:%s' % (r['type'], r['name'])
|
||||
return Record(id=record_id, name=r['name'],
|
||||
type=r['type'], data=r, zone=zone,
|
||||
driver=self, extra={})
|
||||
|
||||
def _cleanup_domain(self, domain):
|
||||
# name can only contain lower case alphanumeric characters and hyphens
|
||||
domain = re.sub(r'[^a-zA-Z0-9-]', '-', domain)
|
||||
if domain[-1] == '-':
|
||||
domain = domain[:-1]
|
||||
return domain
|
||||
243
awx/lib/site-packages/libcloud/dns/drivers/hostvirtual.py
Normal file
243
awx/lib/site-packages/libcloud/dns/drivers/hostvirtual.py
Normal file
@@ -0,0 +1,243 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License.You may obtain a copy of the License at
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'HostVirtualDNSDriver'
|
||||
]
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.misc import merge_valid_keys, get_new_obj
|
||||
from libcloud.common.hostvirtual import HostVirtualResponse
|
||||
from libcloud.common.hostvirtual import HostVirtualConnection
|
||||
from libcloud.compute.drivers.hostvirtual import API_ROOT
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except:
|
||||
import json
|
||||
|
||||
VALID_RECORD_EXTRA_PARAMS = ['prio', 'ttl']
|
||||
|
||||
|
||||
class HostVirtualDNSResponse(HostVirtualResponse):
|
||||
def parse_error(self):
|
||||
context = self.connection.context
|
||||
status = int(self.status)
|
||||
|
||||
if status == httplib.NOT_FOUND:
|
||||
if context['resource'] == 'zone':
|
||||
raise ZoneDoesNotExistError(value='', driver=self,
|
||||
zone_id=context['id'])
|
||||
elif context['resource'] == 'record':
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=context['id'])
|
||||
|
||||
super(HostVirtualDNSResponse, self).parse_error()
|
||||
return self.body
|
||||
|
||||
|
||||
class HostVirtualDNSConnection(HostVirtualConnection):
|
||||
responseCls = HostVirtualDNSResponse
|
||||
|
||||
|
||||
class HostVirtualDNSDriver(DNSDriver):
|
||||
type = Provider.HOSTVIRTUAL
|
||||
name = 'Host Virtual DNS'
|
||||
website = 'http://www.vr.org/'
|
||||
connectionCls = HostVirtualDNSConnection
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.NS: 'SPF',
|
||||
RecordType.SRV: 'SRV',
|
||||
RecordType.TXT: 'TXT',
|
||||
}
|
||||
|
||||
def __init__(self, key, secure=True, host=None, port=None):
|
||||
super(HostVirtualDNSDriver, self).__init__(key=key, secure=secure,
|
||||
host=host, port=port)
|
||||
|
||||
def _to_zones(self, items):
|
||||
zones = []
|
||||
for item in items:
|
||||
zones.append(self._to_zone(item))
|
||||
return zones
|
||||
|
||||
def _to_zone(self, item):
|
||||
extra = {}
|
||||
if 'records' in item:
|
||||
extra['records'] = item['records']
|
||||
if item['type'] == 'NATIVE':
|
||||
item['type'] = 'master'
|
||||
zone = Zone(id=item['id'], domain=item['name'],
|
||||
type=item['type'], ttl=item['ttl'],
|
||||
driver=self, extra=extra)
|
||||
return zone
|
||||
|
||||
def _to_records(self, items, zone=None):
|
||||
records = []
|
||||
|
||||
for item in items:
|
||||
records.append(self._to_record(item=item, zone=zone))
|
||||
return records
|
||||
|
||||
def _to_record(self, item, zone=None):
|
||||
extra = {'ttl': item['ttl']}
|
||||
type = self._string_to_record_type(item['type'])
|
||||
record = Record(id=item['id'], name=item['name'],
|
||||
type=type, data=item['content'],
|
||||
zone=zone, driver=self, extra=extra)
|
||||
return record
|
||||
|
||||
def list_zones(self):
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/zones/').object
|
||||
zones = self._to_zones(result)
|
||||
return zones
|
||||
|
||||
def list_records(self, zone):
|
||||
params = {'id': zone.id}
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/records/', params=params).object
|
||||
records = self._to_records(items=result, zone=zone)
|
||||
return records
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
params = {'id': zone_id}
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone_id})
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/zone/', params=params).object
|
||||
if 'id' not in result:
|
||||
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
|
||||
zone = self._to_zone(result)
|
||||
return zone
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
zone = self.get_zone(zone_id=zone_id)
|
||||
params = {'id': record_id}
|
||||
self.connection.set_context({'resource': 'record', 'id': record_id})
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/record/', params=params).object
|
||||
if 'id' not in result:
|
||||
raise RecordDoesNotExistError(value='',
|
||||
driver=self, record_id=record_id)
|
||||
record = self._to_record(item=result, zone=zone)
|
||||
return record
|
||||
|
||||
def delete_zone(self, zone):
|
||||
params = {'zone_id': zone.id}
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/zone/', params=params, method='DELETE').object
|
||||
return bool(result)
|
||||
|
||||
def delete_record(self, record):
|
||||
params = {'id': record.id}
|
||||
self.connection.set_context({'resource': 'record', 'id': record.id})
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/record/', params=params, method='DELETE').object
|
||||
|
||||
return bool(result)
|
||||
|
||||
def create_zone(self, domain, type='NATIVE', ttl=None, extra=None):
|
||||
if type == 'master':
|
||||
type = 'NATIVE'
|
||||
elif type == 'slave':
|
||||
type = 'SLAVE'
|
||||
params = {'name': domain, 'type': type, 'ttl': ttl}
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/zone/',
|
||||
data=json.dumps(params), method='POST').object
|
||||
extra = {
|
||||
'soa': result['soa'],
|
||||
'ns': result['ns']
|
||||
}
|
||||
zone = Zone(id=result['id'], domain=domain,
|
||||
type=type, ttl=ttl, extra=extra, driver=self)
|
||||
return zone
|
||||
|
||||
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
|
||||
params = {'id': zone.id}
|
||||
if domain:
|
||||
params['name'] = domain
|
||||
if type:
|
||||
params['type'] = type
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
self.connection.request(API_ROOT + '/dns/zone/',
|
||||
data=json.dumps(params), method='PUT').object
|
||||
updated_zone = get_new_obj(
|
||||
obj=zone, klass=Zone,
|
||||
attributes={
|
||||
'domain': domain,
|
||||
'type': type,
|
||||
'ttl': ttl,
|
||||
'extra': extra
|
||||
})
|
||||
return updated_zone
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
params = {
|
||||
'name': name,
|
||||
'type': self.RECORD_TYPE_MAP[type],
|
||||
'domain_id': zone.id,
|
||||
'content': data
|
||||
}
|
||||
merged = merge_valid_keys(
|
||||
params=params,
|
||||
valid_keys=VALID_RECORD_EXTRA_PARAMS,
|
||||
extra=extra
|
||||
)
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
result = self.connection.request(
|
||||
API_ROOT + '/dns/record/',
|
||||
data=json.dumps(params), method='POST').object
|
||||
record = Record(id=result['id'], name=name,
|
||||
type=type, data=data,
|
||||
extra=merged, zone=zone, driver=self)
|
||||
return record
|
||||
|
||||
def update_record(self, record, name=None, type=None,
|
||||
data=None, extra=None):
|
||||
params = {
|
||||
'domain_id': record.zone.id,
|
||||
'record_id': record.id
|
||||
}
|
||||
if name:
|
||||
params['name'] = name
|
||||
if data:
|
||||
params['content'] = data
|
||||
if type is not None:
|
||||
params['type'] = self.RECORD_TYPE_MAP[type]
|
||||
merged = merge_valid_keys(
|
||||
params=params,
|
||||
valid_keys=VALID_RECORD_EXTRA_PARAMS,
|
||||
extra=extra
|
||||
)
|
||||
self.connection.set_context({'resource': 'record', 'id': record.id})
|
||||
self.connection.request(API_ROOT + '/dns/record/',
|
||||
data=json.dumps(params), method='PUT').object
|
||||
updated_record = get_new_obj(
|
||||
obj=record, klass=Record, attributes={
|
||||
'name': name, 'data': data,
|
||||
'type': type,
|
||||
'extra': merged
|
||||
})
|
||||
return updated_record
|
||||
272
awx/lib/site-packages/libcloud/dns/drivers/linode.py
Normal file
272
awx/lib/site-packages/libcloud/dns/drivers/linode.py
Normal file
@@ -0,0 +1,272 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'LinodeDNSDriver'
|
||||
]
|
||||
|
||||
from libcloud.utils.misc import merge_valid_keys, get_new_obj
|
||||
from libcloud.common.linode import (API_ROOT, LinodeException,
|
||||
LinodeConnection, LinodeResponse)
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
|
||||
|
||||
VALID_ZONE_EXTRA_PARAMS = ['SOA_Email', 'Refresh_sec', 'Retry_sec',
|
||||
'Expire_sec', 'status', 'master_ips']
|
||||
|
||||
VALID_RECORD_EXTRA_PARAMS = ['Priority', 'Weight', 'Port', 'Protocol',
|
||||
'TTL_sec']
|
||||
|
||||
|
||||
class LinodeDNSResponse(LinodeResponse):
|
||||
def _make_excp(self, error):
|
||||
result = super(LinodeDNSResponse, self)._make_excp(error)
|
||||
if isinstance(result, LinodeException) and result.code == 5:
|
||||
context = self.connection.context
|
||||
|
||||
if context['resource'] == 'zone':
|
||||
result = ZoneDoesNotExistError(value='',
|
||||
driver=self.connection.driver,
|
||||
zone_id=context['id'])
|
||||
|
||||
elif context['resource'] == 'record':
|
||||
result = RecordDoesNotExistError(value='',
|
||||
driver=self.connection.driver,
|
||||
record_id=context['id'])
|
||||
return result
|
||||
|
||||
|
||||
class LinodeDNSConnection(LinodeConnection):
|
||||
responseCls = LinodeDNSResponse
|
||||
|
||||
|
||||
class LinodeDNSDriver(DNSDriver):
|
||||
type = Provider.LINODE
|
||||
name = 'Linode DNS'
|
||||
website = 'http://www.linode.com/'
|
||||
connectionCls = LinodeDNSConnection
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.NS: 'NS',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.TXT: 'TXT',
|
||||
RecordType.SRV: 'SRV',
|
||||
}
|
||||
|
||||
def list_zones(self):
|
||||
params = {'api_action': 'domain.list'}
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
zones = self._to_zones(data)
|
||||
return zones
|
||||
|
||||
def list_records(self, zone):
|
||||
params = {'api_action': 'domain.resource.list', 'DOMAINID': zone.id}
|
||||
|
||||
self.connection.set_context(context={'resource': 'zone',
|
||||
'id': zone.id})
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
records = self._to_records(items=data, zone=zone)
|
||||
return records
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
params = {'api_action': 'domain.list', 'DomainID': zone_id}
|
||||
self.connection.set_context(context={'resource': 'zone',
|
||||
'id': zone_id})
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
zones = self._to_zones(data)
|
||||
|
||||
if len(zones) != 1:
|
||||
raise ZoneDoesNotExistError(value='', driver=self, zone_id=zone_id)
|
||||
|
||||
return zones[0]
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
zone = self.get_zone(zone_id=zone_id)
|
||||
params = {'api_action': 'domain.resource.list', 'DomainID': zone_id,
|
||||
'ResourceID': record_id}
|
||||
self.connection.set_context(context={'resource': 'record',
|
||||
'id': record_id})
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
records = self._to_records(items=data, zone=zone)
|
||||
|
||||
if len(records) != 1:
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=record_id)
|
||||
|
||||
return records[0]
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
"""
|
||||
Create a new zone.
|
||||
|
||||
API docs: http://www.linode.com/api/dns/domain.create
|
||||
"""
|
||||
params = {'api_action': 'domain.create', 'Type': type,
|
||||
'Domain': domain}
|
||||
|
||||
if ttl:
|
||||
params['TTL_sec'] = ttl
|
||||
|
||||
merged = merge_valid_keys(params=params,
|
||||
valid_keys=VALID_ZONE_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
zone = Zone(id=data['DomainID'], domain=domain, type=type, ttl=ttl,
|
||||
extra=merged, driver=self)
|
||||
return zone
|
||||
|
||||
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
|
||||
"""
|
||||
Update an existing zone.
|
||||
|
||||
API docs: http://www.linode.com/api/dns/domain.update
|
||||
"""
|
||||
params = {'api_action': 'domain.update', 'DomainID': zone.id}
|
||||
|
||||
if type:
|
||||
params['Type'] = type
|
||||
|
||||
if domain:
|
||||
params['Domain'] = domain
|
||||
|
||||
if ttl:
|
||||
params['TTL_sec'] = ttl
|
||||
|
||||
merged = merge_valid_keys(params=params,
|
||||
valid_keys=VALID_ZONE_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
self.connection.request(API_ROOT, params=params).objects[0]
|
||||
updated_zone = get_new_obj(obj=zone, klass=Zone,
|
||||
attributes={'domain': domain,
|
||||
'type': type, 'ttl': ttl,
|
||||
'extra': merged})
|
||||
return updated_zone
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
"""
|
||||
Create a new record.
|
||||
|
||||
API docs: http://www.linode.com/api/dns/domain.resource.create
|
||||
"""
|
||||
params = {'api_action': 'domain.resource.create', 'DomainID': zone.id,
|
||||
'Name': name, 'Target': data,
|
||||
'Type': self.RECORD_TYPE_MAP[type]}
|
||||
merged = merge_valid_keys(params=params,
|
||||
valid_keys=VALID_RECORD_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
|
||||
result = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
record = Record(id=result['ResourceID'], name=name, type=type,
|
||||
data=data, extra=merged, zone=zone, driver=self)
|
||||
return record
|
||||
|
||||
def update_record(self, record, name=None, type=None, data=None,
|
||||
extra=None):
|
||||
"""
|
||||
Update an existing record.
|
||||
|
||||
API docs: http://www.linode.com/api/dns/domain.resource.update
|
||||
"""
|
||||
params = {'api_action': 'domain.resource.update',
|
||||
'ResourceID': record.id, 'DomainID': record.zone.id}
|
||||
|
||||
if name:
|
||||
params['Name'] = name
|
||||
|
||||
if data:
|
||||
params['Target'] = data
|
||||
|
||||
if type is not None:
|
||||
params['Type'] = self.RECORD_TYPE_MAP[type]
|
||||
|
||||
merged = merge_valid_keys(params=params,
|
||||
valid_keys=VALID_RECORD_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
|
||||
self.connection.request(API_ROOT, params=params).objects[0]
|
||||
updated_record = get_new_obj(obj=record, klass=Record,
|
||||
attributes={'name': name, 'data': data,
|
||||
'type': type,
|
||||
'extra': merged})
|
||||
return updated_record
|
||||
|
||||
def delete_zone(self, zone):
|
||||
params = {'api_action': 'domain.delete', 'DomainID': zone.id}
|
||||
|
||||
self.connection.set_context(context={'resource': 'zone',
|
||||
'id': zone.id})
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
|
||||
return 'DomainID' in data
|
||||
|
||||
def delete_record(self, record):
|
||||
params = {'api_action': 'domain.resource.delete',
|
||||
'DomainID': record.zone.id, 'ResourceID': record.id}
|
||||
|
||||
self.connection.set_context(context={'resource': 'record',
|
||||
'id': record.id})
|
||||
data = self.connection.request(API_ROOT, params=params).objects[0]
|
||||
|
||||
return 'ResourceID' in data
|
||||
|
||||
def _to_zones(self, items):
|
||||
"""
|
||||
Convert a list of items to the Zone objects.
|
||||
"""
|
||||
zones = []
|
||||
|
||||
for item in items:
|
||||
zones.append(self._to_zone(item))
|
||||
|
||||
return zones
|
||||
|
||||
def _to_zone(self, item):
|
||||
"""
|
||||
Build an Zone object from the item dictionary.
|
||||
"""
|
||||
extra = {'SOA_Email': item['SOA_EMAIL'], 'status': item['STATUS'],
|
||||
'description': item['DESCRIPTION']}
|
||||
zone = Zone(id=item['DOMAINID'], domain=item['DOMAIN'],
|
||||
type=item['TYPE'], ttl=item['TTL_SEC'], driver=self,
|
||||
extra=extra)
|
||||
return zone
|
||||
|
||||
def _to_records(self, items, zone=None):
|
||||
"""
|
||||
Convert a list of items to the Record objects.
|
||||
"""
|
||||
records = []
|
||||
|
||||
for item in items:
|
||||
records.append(self._to_record(item=item, zone=zone))
|
||||
|
||||
return records
|
||||
|
||||
def _to_record(self, item, zone=None):
|
||||
"""
|
||||
Build a Record object from the item dictionary.
|
||||
"""
|
||||
extra = {'protocol': item['PROTOCOL'], 'ttl_sec': item['TTL_SEC'],
|
||||
'port': item['PORT'], 'weight': item['WEIGHT']}
|
||||
type = self._string_to_record_type(item['TYPE'])
|
||||
record = Record(id=item['RESOURCEID'], name=item['NAME'], type=type,
|
||||
data=item['TARGET'], zone=zone, driver=self,
|
||||
extra=extra)
|
||||
return record
|
||||
450
awx/lib/site-packages/libcloud/dns/drivers/rackspace.py
Normal file
450
awx/lib/site-packages/libcloud/dns/drivers/rackspace.py
Normal file
@@ -0,0 +1,450 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from libcloud.common.openstack import OpenStackDriverMixin
|
||||
|
||||
__all__ = [
|
||||
'RackspaceUSDNSDriver',
|
||||
'RackspaceUKDNSDriver'
|
||||
]
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
import copy
|
||||
|
||||
from libcloud.common.base import PollingConnection
|
||||
from libcloud.common.types import LibcloudError
|
||||
from libcloud.utils.misc import merge_valid_keys, get_new_obj
|
||||
from libcloud.common.rackspace import AUTH_URL
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
|
||||
from libcloud.compute.drivers.openstack import OpenStack_1_1_Response
|
||||
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
|
||||
VALID_ZONE_EXTRA_PARAMS = ['email', 'comment', 'ns1']
|
||||
VALID_RECORD_EXTRA_PARAMS = ['ttl', 'comment', 'priority']
|
||||
|
||||
|
||||
class RackspaceDNSResponse(OpenStack_1_1_Response):
|
||||
"""
|
||||
Rackspace DNS Response class.
|
||||
"""
|
||||
|
||||
def parse_error(self):
|
||||
status = int(self.status)
|
||||
context = self.connection.context
|
||||
body = self.parse_body()
|
||||
|
||||
if status == httplib.NOT_FOUND:
|
||||
if context['resource'] == 'zone':
|
||||
raise ZoneDoesNotExistError(value='', driver=self,
|
||||
zone_id=context['id'])
|
||||
elif context['resource'] == 'record':
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=context['id'])
|
||||
if body:
|
||||
if 'code' and 'message' in body:
|
||||
err = '%s - %s (%s)' % (body['code'], body['message'],
|
||||
body['details'])
|
||||
return err
|
||||
elif 'validationErrors' in body:
|
||||
errors = [m for m in body['validationErrors']['messages']]
|
||||
err = 'Validation errors: %s' % ', '.join(errors)
|
||||
return err
|
||||
|
||||
raise LibcloudError('Unexpected status code: %s' % (status))
|
||||
|
||||
|
||||
class RackspaceDNSConnection(OpenStack_1_1_Connection, PollingConnection):
|
||||
"""
|
||||
Rackspace DNS Connection class.
|
||||
"""
|
||||
|
||||
responseCls = RackspaceDNSResponse
|
||||
XML_NAMESPACE = None
|
||||
poll_interval = 2.5
|
||||
timeout = 30
|
||||
|
||||
auth_url = AUTH_URL
|
||||
_auth_version = '2.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.region = kwargs.pop('region', None)
|
||||
super(RackspaceDNSConnection, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_poll_request_kwargs(self, response, context, request_kwargs):
|
||||
job_id = response.object['jobId']
|
||||
kwargs = {'action': '/status/%s' % (job_id),
|
||||
'params': {'showDetails': True}}
|
||||
return kwargs
|
||||
|
||||
def has_completed(self, response):
|
||||
status = response.object['status']
|
||||
if status == 'ERROR':
|
||||
data = response.object['error']
|
||||
|
||||
if 'code' and 'message' in data:
|
||||
message = '%s - %s (%s)' % (data['code'], data['message'],
|
||||
data['details'])
|
||||
else:
|
||||
message = data['message']
|
||||
|
||||
raise LibcloudError(message,
|
||||
driver=self.driver)
|
||||
|
||||
return status == 'COMPLETED'
|
||||
|
||||
def get_endpoint(self):
|
||||
if '2.0' in self._auth_version:
|
||||
ep = self.service_catalog.get_endpoint(name='cloudDNS',
|
||||
service_type='rax:dns',
|
||||
region=None)
|
||||
else:
|
||||
raise LibcloudError("Auth version %s not supported" %
|
||||
(self._auth_version))
|
||||
|
||||
public_url = ep.get('publicURL', None)
|
||||
|
||||
# This is a nasty hack, but because of how global auth and old accounts
|
||||
# work, there is no way around it.
|
||||
if self.region == 'us':
|
||||
# Old UK account, which only has us endpoint in the catalog
|
||||
public_url = public_url.replace('https://lon.dns.api',
|
||||
'https://dns.api')
|
||||
if self.region == 'uk':
|
||||
# Old US account, which only has uk endpoint in the catalog
|
||||
public_url = public_url.replace('https://dns.api',
|
||||
'https://lon.dns.api')
|
||||
|
||||
return public_url
|
||||
|
||||
|
||||
class RackspaceDNSDriver(DNSDriver, OpenStackDriverMixin):
|
||||
name = 'Rackspace DNS'
|
||||
website = 'http://www.rackspace.com/'
|
||||
type = Provider.RACKSPACE
|
||||
connectionCls = RackspaceDNSConnection
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
region='us', **kwargs):
|
||||
if region not in ['us', 'uk']:
|
||||
raise ValueError('Invalid region: %s' % (region))
|
||||
|
||||
OpenStackDriverMixin.__init__(self, **kwargs)
|
||||
super(RackspaceDNSDriver, self).__init__(key=key, secret=secret,
|
||||
host=host, port=port,
|
||||
region=region)
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.NS: 'NS',
|
||||
RecordType.PTR: 'PTR',
|
||||
RecordType.SRV: 'SRV',
|
||||
RecordType.TXT: 'TXT',
|
||||
}
|
||||
|
||||
def iterate_zones(self):
|
||||
offset = 0
|
||||
limit = 100
|
||||
while True:
|
||||
params = {
|
||||
'limit': limit,
|
||||
'offset': offset,
|
||||
}
|
||||
response = self.connection.request(
|
||||
action='/domains', params=params).object
|
||||
zones_list = response['domains']
|
||||
for item in zones_list:
|
||||
yield self._to_zone(item)
|
||||
|
||||
if _rackspace_result_has_more(response, len(zones_list), limit):
|
||||
offset += limit
|
||||
else:
|
||||
break
|
||||
|
||||
def iterate_records(self, zone):
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
offset = 0
|
||||
limit = 100
|
||||
while True:
|
||||
params = {
|
||||
'showRecord': True,
|
||||
'limit': limit,
|
||||
'offset': offset,
|
||||
}
|
||||
response = self.connection.request(
|
||||
action='/domains/%s' % (zone.id), params=params).object
|
||||
records_list = response['recordsList']
|
||||
records = records_list['records']
|
||||
for item in records:
|
||||
record = self._to_record(data=item, zone=zone)
|
||||
yield record
|
||||
|
||||
if _rackspace_result_has_more(records_list, len(records), limit):
|
||||
offset += limit
|
||||
else:
|
||||
break
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone_id})
|
||||
response = self.connection.request(action='/domains/%s' % (zone_id))
|
||||
zone = self._to_zone(data=response.object)
|
||||
return zone
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
zone = self.get_zone(zone_id=zone_id)
|
||||
self.connection.set_context({'resource': 'record', 'id': record_id})
|
||||
response = self.connection.request(action='/domains/%s/records/%s' %
|
||||
(zone_id, record_id)).object
|
||||
record = self._to_record(data=response, zone=zone)
|
||||
return record
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
extra = extra if extra else {}
|
||||
|
||||
# Email address is required
|
||||
if 'email' not in extra:
|
||||
raise ValueError('"email" key must be present in extra dictionary')
|
||||
|
||||
payload = {'name': domain, 'emailAddress': extra['email'],
|
||||
'recordsList': {'records': []}}
|
||||
|
||||
if ttl:
|
||||
payload['ttl'] = ttl
|
||||
|
||||
if 'comment' in extra:
|
||||
payload['comment'] = extra['comment']
|
||||
|
||||
data = {'domains': [payload]}
|
||||
response = self.connection.async_request(action='/domains',
|
||||
method='POST', data=data)
|
||||
zone = self._to_zone(data=response.object['response']['domains'][0])
|
||||
return zone
|
||||
|
||||
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
|
||||
# Only ttl, comment and email address can be changed
|
||||
extra = extra if extra else {}
|
||||
|
||||
if domain:
|
||||
raise LibcloudError('Domain cannot be changed', driver=self)
|
||||
|
||||
data = {}
|
||||
|
||||
if ttl:
|
||||
data['ttl'] = int(ttl)
|
||||
|
||||
if 'email' in extra:
|
||||
data['emailAddress'] = extra['email']
|
||||
|
||||
if 'comment' in extra:
|
||||
data['comment'] = extra['comment']
|
||||
|
||||
type = type if type else zone.type
|
||||
ttl = ttl if ttl else zone.ttl
|
||||
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
self.connection.async_request(action='/domains/%s' % (zone.id),
|
||||
method='PUT', data=data)
|
||||
merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
|
||||
valid_keys=VALID_ZONE_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
updated_zone = get_new_obj(obj=zone, klass=Zone,
|
||||
attributes={'type': type,
|
||||
'ttl': ttl,
|
||||
'extra': merged})
|
||||
return updated_zone
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
# Name must be a FQDN - e.g. if domain is "foo.com" then a record
|
||||
# name is "bar.foo.com"
|
||||
extra = extra if extra else {}
|
||||
|
||||
name = self._to_full_record_name(domain=zone.domain, name=name)
|
||||
data = {'name': name, 'type': self.RECORD_TYPE_MAP[type],
|
||||
'data': data}
|
||||
|
||||
if 'ttl' in extra:
|
||||
data['ttl'] = int(extra['ttl'])
|
||||
|
||||
if 'priority' in extra:
|
||||
data['priority'] = int(extra['priority'])
|
||||
|
||||
payload = {'records': [data]}
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
response = self.connection.async_request(action='/domains/%s/records'
|
||||
% (zone.id), data=payload,
|
||||
method='POST').object
|
||||
record = self._to_record(data=response['response']['records'][0],
|
||||
zone=zone)
|
||||
return record
|
||||
|
||||
def update_record(self, record, name=None, type=None, data=None,
|
||||
extra=None):
|
||||
# Only data, ttl, and comment attributes can be modified, but name
|
||||
# attribute must always be present.
|
||||
extra = extra if extra else {}
|
||||
|
||||
name = self._to_full_record_name(domain=record.zone.domain,
|
||||
name=record.name)
|
||||
payload = {'name': name}
|
||||
|
||||
if data:
|
||||
payload['data'] = data
|
||||
|
||||
if 'ttl' in extra:
|
||||
payload['ttl'] = extra['ttl']
|
||||
|
||||
if 'comment' in extra:
|
||||
payload['comment'] = extra['comment']
|
||||
|
||||
type = type if type is not None else record.type
|
||||
data = data if data else record.data
|
||||
|
||||
self.connection.set_context({'resource': 'record', 'id': record.id})
|
||||
self.connection.async_request(action='/domains/%s/records/%s' %
|
||||
(record.zone.id, record.id),
|
||||
method='PUT', data=payload)
|
||||
|
||||
merged = merge_valid_keys(params=copy.deepcopy(record.extra),
|
||||
valid_keys=VALID_RECORD_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
updated_record = get_new_obj(obj=record, klass=Record,
|
||||
attributes={'type': type,
|
||||
'data': data,
|
||||
'driver': self,
|
||||
'extra': merged})
|
||||
return updated_record
|
||||
|
||||
def delete_zone(self, zone):
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
self.connection.async_request(action='/domains/%s' % (zone.id),
|
||||
method='DELETE')
|
||||
return True
|
||||
|
||||
def delete_record(self, record):
|
||||
self.connection.set_context({'resource': 'record', 'id': record.id})
|
||||
self.connection.async_request(action='/domains/%s/records/%s' %
|
||||
(record.zone.id, record.id),
|
||||
method='DELETE')
|
||||
return True
|
||||
|
||||
def _to_zone(self, data):
|
||||
id = data['id']
|
||||
domain = data['name']
|
||||
type = 'master'
|
||||
ttl = data.get('ttl', 0)
|
||||
extra = {}
|
||||
|
||||
if 'emailAddress' in data:
|
||||
extra['email'] = data['emailAddress']
|
||||
|
||||
if 'comment' in data:
|
||||
extra['comment'] = data['comment']
|
||||
|
||||
zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
|
||||
driver=self, extra=extra)
|
||||
return zone
|
||||
|
||||
def _to_record(self, data, zone):
|
||||
id = data['id']
|
||||
fqdn = data['name']
|
||||
name = self._to_partial_record_name(domain=zone.domain, name=fqdn)
|
||||
type = self._string_to_record_type(data['type'])
|
||||
record_data = data['data']
|
||||
extra = {'fqdn': fqdn}
|
||||
|
||||
for key in VALID_RECORD_EXTRA_PARAMS:
|
||||
if key in data:
|
||||
extra[key] = data[key]
|
||||
|
||||
record = Record(id=str(id), name=name, type=type, data=record_data,
|
||||
zone=zone, driver=self, extra=extra)
|
||||
return record
|
||||
|
||||
def _to_full_record_name(self, domain, name):
|
||||
"""
|
||||
Build a FQDN from a domain and record name.
|
||||
|
||||
:param domain: Domain name.
|
||||
:type domain: ``str``
|
||||
|
||||
:param name: Record name.
|
||||
:type name: ``str``
|
||||
"""
|
||||
if name:
|
||||
name = '%s.%s' % (name, domain)
|
||||
else:
|
||||
name = domain
|
||||
|
||||
return name
|
||||
|
||||
def _to_partial_record_name(self, domain, name):
|
||||
"""
|
||||
Remove domain portion from the record name.
|
||||
|
||||
:param domain: Domain name.
|
||||
:type domain: ``str``
|
||||
|
||||
:param name: Full record name (fqdn).
|
||||
:type name: ``str``
|
||||
"""
|
||||
if name == domain:
|
||||
# Map "root" record names to None to be consistent with other
|
||||
# drivers
|
||||
return None
|
||||
|
||||
# Strip domain portion
|
||||
name = name.replace('.%s' % (domain), '')
|
||||
return name
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
kwargs = self.openstack_connection_kwargs()
|
||||
kwargs['region'] = self.region
|
||||
return kwargs
|
||||
|
||||
|
||||
class RackspaceUSDNSDriver(RackspaceDNSDriver):
|
||||
name = 'Rackspace DNS (US)'
|
||||
type = Provider.RACKSPACE_US
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
kwargs['region'] = 'us'
|
||||
super(RackspaceUSDNSDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class RackspaceUKDNSDriver(RackspaceDNSDriver):
|
||||
name = 'Rackspace DNS (UK)'
|
||||
type = Provider.RACKSPACE_UK
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
kwargs['region'] = 'uk'
|
||||
super(RackspaceUKDNSDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
def _rackspace_result_has_more(response, result_length, limit):
|
||||
# If rackspace returns less than the limit, then we've reached the end of
|
||||
# the result set.
|
||||
if result_length < limit:
|
||||
return False
|
||||
|
||||
# Paginated results return links to the previous and next sets of data, but
|
||||
# 'next' only exists when there is more to get.
|
||||
for item in response.get('links', ()):
|
||||
if item['rel'] == 'next':
|
||||
return True
|
||||
return False
|
||||
527
awx/lib/site-packages/libcloud/dns/drivers/route53.py
Normal file
527
awx/lib/site-packages/libcloud/dns/drivers/route53.py
Normal file
@@ -0,0 +1,527 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'Route53DNSDriver'
|
||||
]
|
||||
|
||||
import base64
|
||||
import hmac
|
||||
import datetime
|
||||
import uuid
|
||||
import copy
|
||||
from libcloud.utils.py3 import httplib
|
||||
|
||||
from hashlib import sha1
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from libcloud.utils.py3 import b, urlencode
|
||||
|
||||
from libcloud.utils.xml import findtext, findall, fixxpath
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
from libcloud.common.types import LibcloudError
|
||||
from libcloud.common.aws import AWSGenericResponse
|
||||
from libcloud.common.base import ConnectionUserAndKey
|
||||
|
||||
|
||||
API_VERSION = '2012-02-29'
|
||||
API_HOST = 'route53.amazonaws.com'
|
||||
API_ROOT = '/%s/' % (API_VERSION)
|
||||
|
||||
NAMESPACE = 'https://%s/doc%s' % (API_HOST, API_ROOT)
|
||||
|
||||
|
||||
class InvalidChangeBatch(LibcloudError):
|
||||
pass
|
||||
|
||||
|
||||
class Route53DNSResponse(AWSGenericResponse):
|
||||
"""
|
||||
Amazon Route53 response class.
|
||||
"""
|
||||
|
||||
namespace = NAMESPACE
|
||||
xpath = 'Error'
|
||||
|
||||
exceptions = {
|
||||
'NoSuchHostedZone': ZoneDoesNotExistError,
|
||||
'InvalidChangeBatch': InvalidChangeBatch,
|
||||
}
|
||||
|
||||
|
||||
class Route53Connection(ConnectionUserAndKey):
|
||||
host = API_HOST
|
||||
responseCls = Route53DNSResponse
|
||||
|
||||
def pre_connect_hook(self, params, headers):
|
||||
time_string = datetime.datetime.utcnow() \
|
||||
.strftime('%a, %d %b %Y %H:%M:%S GMT')
|
||||
headers['Date'] = time_string
|
||||
tmp = []
|
||||
|
||||
signature = self._get_aws_auth_b64(self.key, time_string)
|
||||
auth = {'AWSAccessKeyId': self.user_id, 'Signature': signature,
|
||||
'Algorithm': 'HmacSHA1'}
|
||||
|
||||
for k, v in auth.items():
|
||||
tmp.append('%s=%s' % (k, v))
|
||||
|
||||
headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' + ','.join(tmp)
|
||||
|
||||
return params, headers
|
||||
|
||||
def _get_aws_auth_b64(self, secret_key, time_string):
|
||||
b64_hmac = base64.b64encode(
|
||||
hmac.new(b(secret_key), b(time_string), digestmod=sha1).digest()
|
||||
)
|
||||
|
||||
return b64_hmac.decode('utf-8')
|
||||
|
||||
|
||||
class Route53DNSDriver(DNSDriver):
|
||||
type = Provider.ROUTE53
|
||||
name = 'Route53 DNS'
|
||||
website = 'http://aws.amazon.com/route53/'
|
||||
connectionCls = Route53Connection
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.NS: 'NS',
|
||||
RecordType.PTR: 'PTR',
|
||||
RecordType.SOA: 'SOA',
|
||||
RecordType.SPF: 'SPF',
|
||||
RecordType.SRV: 'SRV',
|
||||
RecordType.TXT: 'TXT',
|
||||
}
|
||||
|
||||
def iterate_zones(self):
|
||||
return self._get_more('zones')
|
||||
|
||||
def iterate_records(self, zone):
|
||||
return self._get_more('records', zone=zone)
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
self.connection.set_context({'zone_id': zone_id})
|
||||
uri = API_ROOT + 'hostedzone/' + zone_id
|
||||
data = self.connection.request(uri).object
|
||||
elem = findall(element=data, xpath='HostedZone',
|
||||
namespace=NAMESPACE)[0]
|
||||
return self._to_zone(elem)
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
zone = self.get_zone(zone_id=zone_id)
|
||||
record_type, name = record_id.split(':', 1)
|
||||
if name:
|
||||
full_name = ".".join((name, zone.domain))
|
||||
else:
|
||||
full_name = zone.domain
|
||||
self.connection.set_context({'zone_id': zone_id})
|
||||
params = urlencode({
|
||||
'name': full_name,
|
||||
'type': record_type,
|
||||
'maxitems': '1'
|
||||
})
|
||||
uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params
|
||||
data = self.connection.request(uri).object
|
||||
|
||||
record = self._to_records(data=data, zone=zone)[0]
|
||||
|
||||
# A cute aspect of the /rrset filters is that they are more pagination
|
||||
# hints than filters!!
|
||||
# So will return a result even if its not what you asked for.
|
||||
record_type_num = self._string_to_record_type(record_type)
|
||||
if record.name != name or record.type != record_type_num:
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=record_id)
|
||||
|
||||
return record
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
zone = ET.Element('CreateHostedZoneRequest', {'xmlns': NAMESPACE})
|
||||
ET.SubElement(zone, 'Name').text = domain
|
||||
ET.SubElement(zone, 'CallerReference').text = str(uuid.uuid4())
|
||||
|
||||
if extra and 'Comment' in extra:
|
||||
hzg = ET.SubElement(zone, 'HostedZoneConfig')
|
||||
ET.SubElement(hzg, 'Comment').text = extra['Comment']
|
||||
|
||||
uri = API_ROOT + 'hostedzone'
|
||||
data = ET.tostring(zone)
|
||||
rsp = self.connection.request(uri, method='POST', data=data).object
|
||||
|
||||
elem = findall(element=rsp, xpath='HostedZone', namespace=NAMESPACE)[0]
|
||||
return self._to_zone(elem=elem)
|
||||
|
||||
def delete_zone(self, zone, ex_delete_records=False):
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
|
||||
if ex_delete_records:
|
||||
self.ex_delete_all_records(zone=zone)
|
||||
|
||||
uri = API_ROOT + 'hostedzone/%s' % (zone.id)
|
||||
response = self.connection.request(uri, method='DELETE')
|
||||
return response.status in [httplib.OK]
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
extra = extra or {}
|
||||
batch = [('CREATE', name, type, data, extra)]
|
||||
self._post_changeset(zone, batch)
|
||||
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
|
||||
return Record(id=id, name=name, type=type, data=data, zone=zone,
|
||||
driver=self, extra=extra)
|
||||
|
||||
def update_record(self, record, name=None, type=None, data=None,
|
||||
extra=None):
|
||||
name = name or record.name
|
||||
type = type or record.type
|
||||
extra = extra or record.extra
|
||||
|
||||
if not extra:
|
||||
extra = record.extra
|
||||
|
||||
# Multiple value records need to be handled specially - we need to
|
||||
# pass values for other records as well
|
||||
multiple_value_record = record.extra.get('_multi_value', False)
|
||||
other_records = record.extra.get('_other_records', [])
|
||||
|
||||
if multiple_value_record and other_records:
|
||||
self._update_multi_value_record(record=record, name=name,
|
||||
type=type, data=data,
|
||||
extra=extra)
|
||||
else:
|
||||
self._update_single_value_record(record=record, name=name,
|
||||
type=type, data=data,
|
||||
extra=extra)
|
||||
|
||||
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
|
||||
return Record(id=id, name=name, type=type, data=data, zone=record.zone,
|
||||
driver=self, extra=extra)
|
||||
|
||||
def delete_record(self, record):
|
||||
try:
|
||||
r = record
|
||||
batch = [('DELETE', r.name, r.type, r.data, r.extra)]
|
||||
self._post_changeset(record.zone, batch)
|
||||
except InvalidChangeBatch:
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=r.id)
|
||||
return True
|
||||
|
||||
def ex_create_multi_value_record(self, name, zone, type, data, extra=None):
|
||||
"""
|
||||
Create a record with multiple values with a single call.
|
||||
|
||||
:return: A list of created records.
|
||||
:rtype: ``list`` of :class:`libcloud.dns.base.Record`
|
||||
"""
|
||||
extra = extra or {}
|
||||
|
||||
attrs = {'xmlns': NAMESPACE}
|
||||
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
|
||||
batch = ET.SubElement(changeset, 'ChangeBatch')
|
||||
changes = ET.SubElement(batch, 'Changes')
|
||||
|
||||
change = ET.SubElement(changes, 'Change')
|
||||
ET.SubElement(change, 'Action').text = 'CREATE'
|
||||
|
||||
rrs = ET.SubElement(change, 'ResourceRecordSet')
|
||||
ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain
|
||||
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type]
|
||||
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
|
||||
|
||||
rrecs = ET.SubElement(rrs, 'ResourceRecords')
|
||||
|
||||
# Value is provided as a multi line string
|
||||
values = [value.strip() for value in data.split('\n') if
|
||||
value.strip()]
|
||||
|
||||
for value in values:
|
||||
rrec = ET.SubElement(rrecs, 'ResourceRecord')
|
||||
ET.SubElement(rrec, 'Value').text = value
|
||||
|
||||
uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
|
||||
data = ET.tostring(changeset)
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
self.connection.request(uri, method='POST', data=data)
|
||||
|
||||
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
|
||||
|
||||
records = []
|
||||
for value in values:
|
||||
record = Record(id=id, name=name, type=type, data=value, zone=zone,
|
||||
driver=self, extra=extra)
|
||||
records.append(record)
|
||||
|
||||
return record
|
||||
|
||||
def ex_delete_all_records(self, zone):
|
||||
"""
|
||||
Remove all the records for the provided zone.
|
||||
|
||||
:param zone: Zone to delete records for.
|
||||
:type zone: :class:`Zone`
|
||||
"""
|
||||
deletions = []
|
||||
for r in zone.list_records():
|
||||
if r.type in (RecordType.NS, RecordType.SOA):
|
||||
continue
|
||||
deletions.append(('DELETE', r.name, r.type, r.data, r.extra))
|
||||
|
||||
if deletions:
|
||||
self._post_changeset(zone, deletions)
|
||||
|
||||
def _update_single_value_record(self, record, name=None, type=None,
|
||||
data=None, extra=None):
|
||||
batch = [
|
||||
('DELETE', record.name, record.type, record.data, record.extra),
|
||||
('CREATE', name, type, data, extra)
|
||||
]
|
||||
|
||||
return self._post_changeset(record.zone, batch)
|
||||
|
||||
def _update_multi_value_record(self, record, name=None, type=None,
|
||||
data=None, extra=None):
|
||||
other_records = record.extra.get('_other_records', [])
|
||||
|
||||
attrs = {'xmlns': NAMESPACE}
|
||||
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
|
||||
batch = ET.SubElement(changeset, 'ChangeBatch')
|
||||
changes = ET.SubElement(batch, 'Changes')
|
||||
|
||||
# Delete existing records
|
||||
change = ET.SubElement(changes, 'Change')
|
||||
ET.SubElement(change, 'Action').text = 'DELETE'
|
||||
|
||||
rrs = ET.SubElement(change, 'ResourceRecordSet')
|
||||
ET.SubElement(rrs, 'Name').text = record.name + '.' + \
|
||||
record.zone.domain
|
||||
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[record.type]
|
||||
ET.SubElement(rrs, 'TTL').text = str(record.extra.get('ttl', '0'))
|
||||
|
||||
rrecs = ET.SubElement(rrs, 'ResourceRecords')
|
||||
|
||||
rrec = ET.SubElement(rrecs, 'ResourceRecord')
|
||||
ET.SubElement(rrec, 'Value').text = record.data
|
||||
|
||||
for other_record in other_records:
|
||||
rrec = ET.SubElement(rrecs, 'ResourceRecord')
|
||||
ET.SubElement(rrec, 'Value').text = other_record['data']
|
||||
|
||||
# Re-create new (updated) records. Since we are updating a multi value
|
||||
# record, only a single record is updated and others are left as is.
|
||||
change = ET.SubElement(changes, 'Change')
|
||||
ET.SubElement(change, 'Action').text = 'CREATE'
|
||||
|
||||
rrs = ET.SubElement(change, 'ResourceRecordSet')
|
||||
ET.SubElement(rrs, 'Name').text = name + '.' + record.zone.domain
|
||||
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type]
|
||||
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
|
||||
|
||||
rrecs = ET.SubElement(rrs, 'ResourceRecords')
|
||||
|
||||
rrec = ET.SubElement(rrecs, 'ResourceRecord')
|
||||
ET.SubElement(rrec, 'Value').text = data
|
||||
|
||||
for other_record in other_records:
|
||||
rrec = ET.SubElement(rrecs, 'ResourceRecord')
|
||||
ET.SubElement(rrec, 'Value').text = other_record['data']
|
||||
|
||||
uri = API_ROOT + 'hostedzone/' + record.zone.id + '/rrset'
|
||||
data = ET.tostring(changeset)
|
||||
self.connection.set_context({'zone_id': record.zone.id})
|
||||
response = self.connection.request(uri, method='POST', data=data)
|
||||
|
||||
return response.status == httplib.OK
|
||||
|
||||
def _post_changeset(self, zone, changes_list):
|
||||
attrs = {'xmlns': NAMESPACE}
|
||||
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
|
||||
batch = ET.SubElement(changeset, 'ChangeBatch')
|
||||
changes = ET.SubElement(batch, 'Changes')
|
||||
|
||||
for action, name, type_, data, extra in changes_list:
|
||||
change = ET.SubElement(changes, 'Change')
|
||||
ET.SubElement(change, 'Action').text = action
|
||||
|
||||
rrs = ET.SubElement(change, 'ResourceRecordSet')
|
||||
ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain
|
||||
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_]
|
||||
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
|
||||
|
||||
rrecs = ET.SubElement(rrs, 'ResourceRecords')
|
||||
rrec = ET.SubElement(rrecs, 'ResourceRecord')
|
||||
ET.SubElement(rrec, 'Value').text = data
|
||||
|
||||
uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
|
||||
data = ET.tostring(changeset)
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
response = self.connection.request(uri, method='POST', data=data)
|
||||
|
||||
return response.status == httplib.OK
|
||||
|
||||
def _to_zones(self, data):
|
||||
zones = []
|
||||
for element in data.findall(fixxpath(xpath='HostedZones/HostedZone',
|
||||
namespace=NAMESPACE)):
|
||||
zones.append(self._to_zone(element))
|
||||
|
||||
return zones
|
||||
|
||||
def _to_zone(self, elem):
|
||||
name = findtext(element=elem, xpath='Name', namespace=NAMESPACE)
|
||||
id = findtext(element=elem, xpath='Id',
|
||||
namespace=NAMESPACE).replace('/hostedzone/', '')
|
||||
comment = findtext(element=elem, xpath='Config/Comment',
|
||||
namespace=NAMESPACE)
|
||||
resource_record_count = int(findtext(element=elem,
|
||||
xpath='ResourceRecordSetCount',
|
||||
namespace=NAMESPACE))
|
||||
|
||||
extra = {'Comment': comment, 'ResourceRecordSetCount':
|
||||
resource_record_count}
|
||||
|
||||
zone = Zone(id=id, domain=name, type='master', ttl=0, driver=self,
|
||||
extra=extra)
|
||||
return zone
|
||||
|
||||
def _to_records(self, data, zone):
|
||||
records = []
|
||||
elems = data.findall(
|
||||
fixxpath(xpath='ResourceRecordSets/ResourceRecordSet',
|
||||
namespace=NAMESPACE))
|
||||
for elem in elems:
|
||||
record_set = elem.findall(fixxpath(
|
||||
xpath='ResourceRecords/ResourceRecord',
|
||||
namespace=NAMESPACE))
|
||||
record_count = len(record_set)
|
||||
multiple_value_record = (record_count > 1)
|
||||
|
||||
record_set_records = []
|
||||
|
||||
for index, record in enumerate(record_set):
|
||||
# Need to special handling for records with multiple values for
|
||||
# update to work correctly
|
||||
record = self._to_record(elem=elem, zone=zone, index=index)
|
||||
record.extra['_multi_value'] = multiple_value_record
|
||||
|
||||
if multiple_value_record:
|
||||
record.extra['_other_records'] = []
|
||||
|
||||
record_set_records.append(record)
|
||||
|
||||
# Store reference to other records so update works correctly
|
||||
if multiple_value_record:
|
||||
for index in range(0, len(record_set_records)):
|
||||
record = record_set_records[index]
|
||||
|
||||
for other_index, other_record in \
|
||||
enumerate(record_set_records):
|
||||
if index == other_index:
|
||||
# Skip current record
|
||||
continue
|
||||
|
||||
extra = copy.deepcopy(other_record.extra)
|
||||
extra.pop('_multi_value')
|
||||
extra.pop('_other_records')
|
||||
|
||||
item = {'name': other_record.name,
|
||||
'data': other_record.data,
|
||||
'type': other_record.type,
|
||||
'extra': extra}
|
||||
record.extra['_other_records'].append(item)
|
||||
|
||||
records.extend(record_set_records)
|
||||
|
||||
return records
|
||||
|
||||
def _to_record(self, elem, zone, index=0):
|
||||
name = findtext(element=elem, xpath='Name',
|
||||
namespace=NAMESPACE)
|
||||
name = name[:-len(zone.domain) - 1]
|
||||
|
||||
type = self._string_to_record_type(findtext(element=elem, xpath='Type',
|
||||
namespace=NAMESPACE))
|
||||
ttl = int(findtext(element=elem, xpath='TTL', namespace=NAMESPACE))
|
||||
|
||||
value_elem = elem.findall(
|
||||
fixxpath(xpath='ResourceRecords/ResourceRecord',
|
||||
namespace=NAMESPACE))[index]
|
||||
data = findtext(element=(value_elem), xpath='Value',
|
||||
namespace=NAMESPACE)
|
||||
|
||||
extra = {'ttl': ttl}
|
||||
|
||||
if type == 'MX':
|
||||
split = data.split()
|
||||
priority, data = split
|
||||
extra['priority'] = int(priority)
|
||||
elif type == 'SRV':
|
||||
split = data.split()
|
||||
priority, weight, port, data = split
|
||||
extra['priority'] = int(priority)
|
||||
extra['weight'] = int(weight)
|
||||
extra['port'] = int(port)
|
||||
|
||||
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
|
||||
record = Record(id=id, name=name, type=type, data=data, zone=zone,
|
||||
driver=self, extra=extra)
|
||||
return record
|
||||
|
||||
def _get_more(self, rtype, **kwargs):
|
||||
exhausted = False
|
||||
last_key = None
|
||||
while not exhausted:
|
||||
items, last_key, exhausted = self._get_data(rtype, last_key,
|
||||
**kwargs)
|
||||
for item in items:
|
||||
yield item
|
||||
|
||||
def _get_data(self, rtype, last_key, **kwargs):
|
||||
params = {}
|
||||
if last_key:
|
||||
params['name'] = last_key
|
||||
path = API_ROOT + 'hostedzone'
|
||||
|
||||
if rtype == 'zones':
|
||||
response = self.connection.request(path, params=params)
|
||||
transform_func = self._to_zones
|
||||
elif rtype == 'records':
|
||||
zone = kwargs['zone']
|
||||
path += '/%s/rrset' % (zone.id)
|
||||
self.connection.set_context({'zone_id': zone.id})
|
||||
response = self.connection.request(path, params=params)
|
||||
transform_func = self._to_records
|
||||
|
||||
if response.status == httplib.OK:
|
||||
is_truncated = findtext(element=response.object,
|
||||
xpath='IsTruncated',
|
||||
namespace=NAMESPACE)
|
||||
exhausted = is_truncated != 'true'
|
||||
last_key = findtext(element=response.object,
|
||||
xpath='NextRecordName',
|
||||
namespace=NAMESPACE)
|
||||
items = transform_func(data=response.object, **kwargs)
|
||||
return items, last_key, exhausted
|
||||
else:
|
||||
return [], None, True
|
||||
484
awx/lib/site-packages/libcloud/dns/drivers/zerigo.py
Normal file
484
awx/lib/site-packages/libcloud/dns/drivers/zerigo.py
Normal file
@@ -0,0 +1,484 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'ZerigoDNSDriver'
|
||||
]
|
||||
|
||||
|
||||
import copy
|
||||
import base64
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
try:
|
||||
from lxml import etree as ET
|
||||
except ImportError:
|
||||
from xml.etree import ElementTree as ET
|
||||
|
||||
from libcloud.utils.misc import merge_valid_keys, get_new_obj
|
||||
from libcloud.utils.xml import findtext, findall
|
||||
from libcloud.common.base import XmlResponse, ConnectionUserAndKey
|
||||
from libcloud.common.types import InvalidCredsError, LibcloudError
|
||||
from libcloud.common.types import MalformedResponseError
|
||||
from libcloud.dns.types import Provider, RecordType
|
||||
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
|
||||
from libcloud.dns.base import DNSDriver, Zone, Record
|
||||
|
||||
API_HOST = 'ns.zerigo.com'
|
||||
API_VERSION = '1.1'
|
||||
API_ROOT = '/api/%s/' % (API_VERSION)
|
||||
|
||||
VALID_ZONE_EXTRA_PARAMS = ['notes', 'tag-list', 'ns1', 'slave-nameservers']
|
||||
VALID_RECORD_EXTRA_PARAMS = ['notes', 'ttl', 'priority']
|
||||
|
||||
# Number of items per page (maximum limit is 1000)
|
||||
ITEMS_PER_PAGE = 100
|
||||
|
||||
|
||||
class ZerigoError(LibcloudError):
|
||||
def __init__(self, code, errors):
|
||||
self.code = code
|
||||
self.errors = errors or []
|
||||
|
||||
def __str__(self):
|
||||
return 'Errors: %s' % (', '.join(self.errors))
|
||||
|
||||
def __repr__(self):
|
||||
return ('<ZerigoError response code=%s, errors count=%s>' % (
|
||||
self.code, len(self.errors)))
|
||||
|
||||
|
||||
class ZerigoDNSResponse(XmlResponse):
|
||||
def success(self):
|
||||
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
|
||||
|
||||
def parse_error(self):
|
||||
status = int(self.status)
|
||||
|
||||
if status == 401:
|
||||
if not self.body:
|
||||
raise InvalidCredsError(str(self.status) + ': ' + self.error)
|
||||
else:
|
||||
raise InvalidCredsError(self.body)
|
||||
elif status == 404:
|
||||
context = self.connection.context
|
||||
if context['resource'] == 'zone':
|
||||
raise ZoneDoesNotExistError(value='', driver=self,
|
||||
zone_id=context['id'])
|
||||
elif context['resource'] == 'record':
|
||||
raise RecordDoesNotExistError(value='', driver=self,
|
||||
record_id=context['id'])
|
||||
elif status != 503:
|
||||
try:
|
||||
body = ET.XML(self.body)
|
||||
except:
|
||||
raise MalformedResponseError('Failed to parse XML',
|
||||
body=self.body)
|
||||
|
||||
errors = []
|
||||
for error in findall(element=body, xpath='error'):
|
||||
errors.append(error.text)
|
||||
|
||||
raise ZerigoError(code=status, errors=errors)
|
||||
|
||||
return self.body
|
||||
|
||||
|
||||
class ZerigoDNSConnection(ConnectionUserAndKey):
|
||||
host = API_HOST
|
||||
secure = True
|
||||
responseCls = ZerigoDNSResponse
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
auth_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
|
||||
headers['Authorization'] = 'Basic %s' % (auth_b64.decode('utf-8'))
|
||||
return headers
|
||||
|
||||
def request(self, action, params=None, data='', headers=None,
|
||||
method='GET'):
|
||||
if not headers:
|
||||
headers = {}
|
||||
if not params:
|
||||
params = {}
|
||||
|
||||
if method in ("POST", "PUT"):
|
||||
headers = {'Content-Type': 'application/xml; charset=UTF-8'}
|
||||
return super(ZerigoDNSConnection, self).request(action=action,
|
||||
params=params,
|
||||
data=data,
|
||||
method=method,
|
||||
headers=headers)
|
||||
|
||||
|
||||
class ZerigoDNSDriver(DNSDriver):
|
||||
type = Provider.ZERIGO
|
||||
name = 'Zerigo DNS'
|
||||
website = 'http://www.zerigo.com/'
|
||||
connectionCls = ZerigoDNSConnection
|
||||
|
||||
RECORD_TYPE_MAP = {
|
||||
RecordType.A: 'A',
|
||||
RecordType.AAAA: 'AAAA',
|
||||
RecordType.CNAME: 'CNAME',
|
||||
RecordType.GEO: 'GEO',
|
||||
RecordType.MX: 'MX',
|
||||
RecordType.NAPTR: 'NAPTR',
|
||||
RecordType.NS: 'NS',
|
||||
RecordType.PTR: 'PTR',
|
||||
RecordType.REDIRECT: 'REDIRECT',
|
||||
RecordType.SPF: 'SPF',
|
||||
RecordType.SRV: 'SRV',
|
||||
RecordType.TXT: 'TXT',
|
||||
RecordType.URL: 'URL',
|
||||
}
|
||||
|
||||
def iterate_zones(self):
|
||||
return self._get_more('zones')
|
||||
|
||||
def iterate_records(self, zone):
|
||||
return self._get_more('records', zone=zone)
|
||||
|
||||
def get_zone(self, zone_id):
|
||||
path = API_ROOT + 'zones/%s.xml' % (zone_id)
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone_id})
|
||||
data = self.connection.request(path).object
|
||||
zone = self._to_zone(elem=data)
|
||||
return zone
|
||||
|
||||
def get_record(self, zone_id, record_id):
|
||||
zone = self.get_zone(zone_id=zone_id)
|
||||
self.connection.set_context({'resource': 'record', 'id': record_id})
|
||||
path = API_ROOT + 'hosts/%s.xml' % (record_id)
|
||||
data = self.connection.request(path).object
|
||||
record = self._to_record(elem=data, zone=zone)
|
||||
return record
|
||||
|
||||
def create_zone(self, domain, type='master', ttl=None, extra=None):
|
||||
"""
|
||||
Create a new zone.
|
||||
|
||||
Provider API docs:
|
||||
https://www.zerigo.com/docs/apis/dns/1.1/zones/create
|
||||
|
||||
@inherits: :class:`DNSDriver.create_zone`
|
||||
"""
|
||||
path = API_ROOT + 'zones.xml'
|
||||
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
|
||||
extra=extra)
|
||||
data = self.connection.request(action=path,
|
||||
data=ET.tostring(zone_elem),
|
||||
method='POST').object
|
||||
zone = self._to_zone(elem=data)
|
||||
return zone
|
||||
|
||||
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
|
||||
"""
|
||||
Update an existing zone.
|
||||
|
||||
Provider API docs:
|
||||
https://www.zerigo.com/docs/apis/dns/1.1/zones/update
|
||||
|
||||
@inherits: :class:`DNSDriver.update_zone`
|
||||
"""
|
||||
if domain:
|
||||
raise LibcloudError('Domain cannot be changed', driver=self)
|
||||
|
||||
path = API_ROOT + 'zones/%s.xml' % (zone.id)
|
||||
zone_elem = self._to_zone_elem(domain=domain, type=type, ttl=ttl,
|
||||
extra=extra)
|
||||
response = self.connection.request(action=path,
|
||||
data=ET.tostring(zone_elem),
|
||||
method='PUT')
|
||||
assert response.status == httplib.OK
|
||||
|
||||
merged = merge_valid_keys(params=copy.deepcopy(zone.extra),
|
||||
valid_keys=VALID_ZONE_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
updated_zone = get_new_obj(obj=zone, klass=Zone,
|
||||
attributes={'type': type,
|
||||
'ttl': ttl,
|
||||
'extra': merged})
|
||||
return updated_zone
|
||||
|
||||
def create_record(self, name, zone, type, data, extra=None):
|
||||
"""
|
||||
Create a new record.
|
||||
|
||||
Provider API docs:
|
||||
https://www.zerigo.com/docs/apis/dns/1.1/hosts/create
|
||||
|
||||
@inherits: :class:`DNSDriver.create_record`
|
||||
"""
|
||||
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
|
||||
record_elem = self._to_record_elem(name=name, type=type, data=data,
|
||||
extra=extra)
|
||||
response = self.connection.request(action=path,
|
||||
data=ET.tostring(record_elem),
|
||||
method='POST')
|
||||
assert response.status == httplib.CREATED
|
||||
record = self._to_record(elem=response.object, zone=zone)
|
||||
return record
|
||||
|
||||
def update_record(self, record, name=None, type=None, data=None,
|
||||
extra=None):
|
||||
path = API_ROOT + 'hosts/%s.xml' % (record.id)
|
||||
record_elem = self._to_record_elem(name=name, type=type, data=data,
|
||||
extra=extra)
|
||||
response = self.connection.request(action=path,
|
||||
data=ET.tostring(record_elem),
|
||||
method='PUT')
|
||||
assert response.status == httplib.OK
|
||||
|
||||
merged = merge_valid_keys(params=copy.deepcopy(record.extra),
|
||||
valid_keys=VALID_RECORD_EXTRA_PARAMS,
|
||||
extra=extra)
|
||||
updated_record = get_new_obj(obj=record, klass=Record,
|
||||
attributes={'type': type,
|
||||
'data': data,
|
||||
'extra': merged})
|
||||
return updated_record
|
||||
|
||||
def delete_zone(self, zone):
|
||||
path = API_ROOT + 'zones/%s.xml' % (zone.id)
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
response = self.connection.request(action=path, method='DELETE')
|
||||
return response.status == httplib.OK
|
||||
|
||||
def delete_record(self, record):
|
||||
path = API_ROOT + 'hosts/%s.xml' % (record.id)
|
||||
self.connection.set_context({'resource': 'record', 'id': record.id})
|
||||
response = self.connection.request(action=path, method='DELETE')
|
||||
return response.status == httplib.OK
|
||||
|
||||
def ex_get_zone_by_domain(self, domain):
|
||||
"""
|
||||
Retrieve a zone object by the domain name.
|
||||
|
||||
:param domain: The domain which should be used
|
||||
:type domain: ``str``
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
path = API_ROOT + 'zones/%s.xml' % (domain)
|
||||
self.connection.set_context({'resource': 'zone', 'id': domain})
|
||||
data = self.connection.request(path).object
|
||||
zone = self._to_zone(elem=data)
|
||||
return zone
|
||||
|
||||
def ex_force_slave_axfr(self, zone):
|
||||
"""
|
||||
Force a zone transfer.
|
||||
|
||||
:param zone: Zone which should be used.
|
||||
:type zone: :class:`Zone`
|
||||
|
||||
:rtype: :class:`Zone`
|
||||
"""
|
||||
path = API_ROOT + 'zones/%s/force_slave_axfr.xml' % (zone.id)
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
response = self.connection.request(path, method='POST')
|
||||
assert response.status == httplib.ACCEPTED
|
||||
return zone
|
||||
|
||||
def _to_zone_elem(self, domain=None, type=None, ttl=None, extra=None):
|
||||
zone_elem = ET.Element('zone', {})
|
||||
|
||||
if domain:
|
||||
domain_elem = ET.SubElement(zone_elem, 'domain')
|
||||
domain_elem.text = domain
|
||||
|
||||
if type:
|
||||
ns_type_elem = ET.SubElement(zone_elem, 'ns-type')
|
||||
|
||||
if type == 'master':
|
||||
ns_type_elem.text = 'pri_sec'
|
||||
elif type == 'slave':
|
||||
if not extra or 'ns1' not in extra:
|
||||
raise LibcloudError('ns1 extra attribute is required ' +
|
||||
'when zone type is slave', driver=self)
|
||||
|
||||
ns_type_elem.text = 'sec'
|
||||
ns1_elem = ET.SubElement(zone_elem, 'ns1')
|
||||
ns1_elem.text = extra['ns1']
|
||||
elif type == 'std_master':
|
||||
# TODO: Each driver should provide supported zone types
|
||||
# Slave name servers are elsewhere
|
||||
if not extra or 'slave-nameservers' not in extra:
|
||||
raise LibcloudError('slave-nameservers extra ' +
|
||||
'attribute is required whenzone ' +
|
||||
'type is std_master', driver=self)
|
||||
|
||||
ns_type_elem.text = 'pri'
|
||||
slave_nameservers_elem = ET.SubElement(zone_elem,
|
||||
'slave-nameservers')
|
||||
slave_nameservers_elem.text = extra['slave-nameservers']
|
||||
|
||||
if ttl:
|
||||
default_ttl_elem = ET.SubElement(zone_elem, 'default-ttl')
|
||||
default_ttl_elem.text = str(ttl)
|
||||
|
||||
if extra and 'tag-list' in extra:
|
||||
tags = extra['tag-list']
|
||||
|
||||
tags_elem = ET.SubElement(zone_elem, 'tag-list')
|
||||
tags_elem.text = ' '.join(tags)
|
||||
|
||||
return zone_elem
|
||||
|
||||
def _to_record_elem(self, name=None, type=None, data=None, extra=None):
|
||||
record_elem = ET.Element('host', {})
|
||||
|
||||
if name:
|
||||
name_elem = ET.SubElement(record_elem, 'hostname')
|
||||
name_elem.text = name
|
||||
|
||||
if type is not None:
|
||||
type_elem = ET.SubElement(record_elem, 'host-type')
|
||||
type_elem.text = self.RECORD_TYPE_MAP[type]
|
||||
|
||||
if data:
|
||||
data_elem = ET.SubElement(record_elem, 'data')
|
||||
data_elem.text = data
|
||||
|
||||
if extra:
|
||||
if 'ttl' in extra:
|
||||
ttl_elem = ET.SubElement(record_elem, 'ttl',
|
||||
{'type': 'integer'})
|
||||
ttl_elem.text = str(extra['ttl'])
|
||||
|
||||
if 'priority' in extra:
|
||||
# Only MX and SRV records support priority
|
||||
priority_elem = ET.SubElement(record_elem, 'priority',
|
||||
{'type': 'integer'})
|
||||
|
||||
priority_elem.text = str(extra['priority'])
|
||||
|
||||
if 'notes' in extra:
|
||||
notes_elem = ET.SubElement(record_elem, 'notes')
|
||||
notes_elem.text = extra['notes']
|
||||
|
||||
return record_elem
|
||||
|
||||
def _to_zones(self, elem):
|
||||
zones = []
|
||||
|
||||
for item in findall(element=elem, xpath='zone'):
|
||||
zone = self._to_zone(elem=item)
|
||||
zones.append(zone)
|
||||
|
||||
return zones
|
||||
|
||||
def _to_zone(self, elem):
|
||||
id = findtext(element=elem, xpath='id')
|
||||
domain = findtext(element=elem, xpath='domain')
|
||||
type = findtext(element=elem, xpath='ns-type')
|
||||
type = 'master' if type.find('pri') == 0 else 'slave'
|
||||
ttl = findtext(element=elem, xpath='default-ttl')
|
||||
|
||||
hostmaster = findtext(element=elem, xpath='hostmaster')
|
||||
custom_ns = findtext(element=elem, xpath='custom-ns')
|
||||
custom_nameservers = findtext(element=elem, xpath='custom-nameservers')
|
||||
notes = findtext(element=elem, xpath='notes')
|
||||
nx_ttl = findtext(element=elem, xpath='nx-ttl')
|
||||
slave_nameservers = findtext(element=elem, xpath='slave-nameservers')
|
||||
tags = findtext(element=elem, xpath='tag-list')
|
||||
tags = tags.split(' ') if tags else []
|
||||
|
||||
extra = {'hostmaster': hostmaster, 'custom-ns': custom_ns,
|
||||
'custom-nameservers': custom_nameservers, 'notes': notes,
|
||||
'nx-ttl': nx_ttl, 'slave-nameservers': slave_nameservers,
|
||||
'tags': tags}
|
||||
zone = Zone(id=str(id), domain=domain, type=type, ttl=int(ttl),
|
||||
driver=self, extra=extra)
|
||||
return zone
|
||||
|
||||
def _to_records(self, elem, zone):
|
||||
records = []
|
||||
|
||||
for item in findall(element=elem, xpath='host'):
|
||||
record = self._to_record(elem=item, zone=zone)
|
||||
records.append(record)
|
||||
|
||||
return records
|
||||
|
||||
def _to_record(self, elem, zone):
|
||||
id = findtext(element=elem, xpath='id')
|
||||
name = findtext(element=elem, xpath='hostname')
|
||||
type = findtext(element=elem, xpath='host-type')
|
||||
type = self._string_to_record_type(type)
|
||||
data = findtext(element=elem, xpath='data')
|
||||
|
||||
notes = findtext(element=elem, xpath='notes', no_text_value=None)
|
||||
state = findtext(element=elem, xpath='state', no_text_value=None)
|
||||
fqdn = findtext(element=elem, xpath='fqdn', no_text_value=None)
|
||||
priority = findtext(element=elem, xpath='priority', no_text_value=None)
|
||||
ttl = findtext(element=elem, xpath='ttl', no_text_value=None)
|
||||
|
||||
if not name:
|
||||
name = None
|
||||
|
||||
if ttl:
|
||||
ttl = int(ttl)
|
||||
|
||||
extra = {'notes': notes, 'state': state, 'fqdn': fqdn,
|
||||
'priority': priority, 'ttl': ttl}
|
||||
|
||||
record = Record(id=id, name=name, type=type, data=data,
|
||||
zone=zone, driver=self, extra=extra)
|
||||
return record
|
||||
|
||||
def _get_more(self, rtype, **kwargs):
|
||||
exhausted = False
|
||||
last_key = None
|
||||
|
||||
while not exhausted:
|
||||
items, last_key, exhausted = self._get_data(rtype, last_key,
|
||||
**kwargs)
|
||||
|
||||
for item in items:
|
||||
yield item
|
||||
|
||||
def _get_data(self, rtype, last_key, **kwargs):
|
||||
# Note: last_key in this case really is a "last_page".
|
||||
# TODO: Update base driver and change last_key to something more
|
||||
# generic - e.g. marker
|
||||
params = {}
|
||||
params['per_page'] = ITEMS_PER_PAGE
|
||||
params['page'] = last_key + 1 if last_key else 1
|
||||
|
||||
if rtype == 'zones':
|
||||
path = API_ROOT + 'zones.xml'
|
||||
response = self.connection.request(path)
|
||||
transform_func = self._to_zones
|
||||
elif rtype == 'records':
|
||||
zone = kwargs['zone']
|
||||
path = API_ROOT + 'zones/%s/hosts.xml' % (zone.id)
|
||||
self.connection.set_context({'resource': 'zone', 'id': zone.id})
|
||||
response = self.connection.request(path, params=params)
|
||||
transform_func = self._to_records
|
||||
|
||||
exhausted = False
|
||||
result_count = int(response.headers.get('x-query-count', 0))
|
||||
|
||||
if (params['page'] * ITEMS_PER_PAGE) >= result_count:
|
||||
exhausted = True
|
||||
|
||||
if response.status == httplib.OK:
|
||||
items = transform_func(elem=response.object, **kwargs)
|
||||
return items, params['page'], exhausted
|
||||
else:
|
||||
return [], None, True
|
||||
49
awx/lib/site-packages/libcloud/dns/providers.py
Normal file
49
awx/lib/site-packages/libcloud/dns/providers.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.utils.misc import get_driver as get_provider_driver
|
||||
from libcloud.utils.misc import set_driver as set_provider_driver
|
||||
from libcloud.dns.types import Provider
|
||||
|
||||
DRIVERS = {
|
||||
Provider.DUMMY:
|
||||
('libcloud.dns.drivers.dummy', 'DummyDNSDriver'),
|
||||
Provider.LINODE:
|
||||
('libcloud.dns.drivers.linode', 'LinodeDNSDriver'),
|
||||
Provider.ZERIGO:
|
||||
('libcloud.dns.drivers.zerigo', 'ZerigoDNSDriver'),
|
||||
Provider.RACKSPACE:
|
||||
('libcloud.dns.drivers.rackspace', 'RackspaceDNSDriver'),
|
||||
Provider.HOSTVIRTUAL:
|
||||
('libcloud.dns.drivers.hostvirtual', 'HostVirtualDNSDriver'),
|
||||
Provider.ROUTE53:
|
||||
('libcloud.dns.drivers.route53', 'Route53DNSDriver'),
|
||||
Provider.GANDI:
|
||||
('libcloud.dns.drivers.gandi', 'GandiDNSDriver'),
|
||||
Provider.GOOGLE: ('libcloud.dns.drivers.google', 'GoogleDNSDriver'),
|
||||
# Deprecated
|
||||
Provider.RACKSPACE_US:
|
||||
('libcloud.dns.drivers.rackspace', 'RackspaceUSDNSDriver'),
|
||||
Provider.RACKSPACE_UK:
|
||||
('libcloud.dns.drivers.rackspace', 'RackspaceUKDNSDriver')
|
||||
}
|
||||
|
||||
|
||||
def get_driver(provider):
|
||||
return get_provider_driver(DRIVERS, provider)
|
||||
|
||||
|
||||
def set_driver(provider, module, klass):
|
||||
return set_provider_driver(DRIVERS, provider, module, klass)
|
||||
115
awx/lib/site-packages/libcloud/dns/types.py
Normal file
115
awx/lib/site-packages/libcloud/dns/types.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.common.types import LibcloudError
|
||||
|
||||
__all__ = [
|
||||
'Provider',
|
||||
'RecordType',
|
||||
'ZoneError',
|
||||
'ZoneDoesNotExistError',
|
||||
'ZoneAlreadyExistsError',
|
||||
'RecordError',
|
||||
'RecordDoesNotExistError',
|
||||
'RecordAlreadyExistsError'
|
||||
]
|
||||
|
||||
|
||||
class Provider(object):
|
||||
DUMMY = 'dummy'
|
||||
LINODE = 'linode'
|
||||
RACKSPACE = 'rackspace'
|
||||
ZERIGO = 'zerigo'
|
||||
ROUTE53 = 'route53'
|
||||
HOSTVIRTUAL = 'hostvirtual'
|
||||
GANDI = 'gandi'
|
||||
GOOGLE = 'google'
|
||||
|
||||
# Deprecated
|
||||
RACKSPACE_US = 'rackspace_us'
|
||||
RACKSPACE_UK = 'rackspace_uk'
|
||||
|
||||
|
||||
class RecordType(object):
|
||||
"""
|
||||
DNS record type.
|
||||
"""
|
||||
A = 'A'
|
||||
AAAA = 'AAAA'
|
||||
MX = 'MX'
|
||||
NS = 'NS'
|
||||
CNAME = 'CNAME'
|
||||
DNAME = 'DNAME'
|
||||
TXT = 'TXT'
|
||||
PTR = 'PTR'
|
||||
SOA = 'SOA'
|
||||
SPF = 'SPF'
|
||||
SRV = 'SRV'
|
||||
PTR = 'PTR'
|
||||
NAPTR = 'NAPTR'
|
||||
REDIRECT = 'REDIRECT'
|
||||
GEO = 'GEO'
|
||||
URL = 'URL'
|
||||
WKS = 'WKS'
|
||||
LOC = 'LOC'
|
||||
|
||||
|
||||
class ZoneError(LibcloudError):
|
||||
error_type = 'ZoneError'
|
||||
kwargs = ('zone_id', )
|
||||
|
||||
def __init__(self, value, driver, zone_id):
|
||||
self.zone_id = zone_id
|
||||
super(ZoneError, self).__init__(value=value, driver=driver)
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s in %s, zone_id=%s, value=%s>' %
|
||||
(self.error_type, repr(self.driver),
|
||||
self.zone_id, self.value))
|
||||
|
||||
|
||||
class ZoneDoesNotExistError(ZoneError):
|
||||
error_type = 'ZoneDoesNotExistError'
|
||||
|
||||
|
||||
class ZoneAlreadyExistsError(ZoneError):
|
||||
error_type = 'ZoneAlreadyExistsError'
|
||||
|
||||
|
||||
class RecordError(LibcloudError):
|
||||
error_type = 'RecordError'
|
||||
|
||||
def __init__(self, value, driver, record_id):
|
||||
self.record_id = record_id
|
||||
super(RecordError, self).__init__(value=value, driver=driver)
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return ('<%s in %s, record_id=%s, value=%s>' %
|
||||
(self.error_type, repr(self.driver),
|
||||
self.record_id, self.value))
|
||||
|
||||
|
||||
class RecordDoesNotExistError(RecordError):
|
||||
error_type = 'RecordDoesNotExistError'
|
||||
|
||||
|
||||
class RecordAlreadyExistsError(RecordError):
|
||||
error_type = 'RecordAlreadyExistsError'
|
||||
158
awx/lib/site-packages/libcloud/httplib_ssl.py
Normal file
158
awx/lib/site-packages/libcloud/httplib_ssl.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Subclass for httplib.HTTPSConnection with optional certificate name
|
||||
verification, depending on libcloud.security settings.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
import warnings
|
||||
|
||||
import libcloud.security
|
||||
from libcloud.utils.py3 import httplib
|
||||
|
||||
|
||||
class LibcloudHTTPSConnection(httplib.HTTPSConnection):
|
||||
"""
|
||||
LibcloudHTTPSConnection
|
||||
|
||||
Subclass of HTTPSConnection which verifies certificate names
|
||||
if and only if CA certificates are available.
|
||||
"""
|
||||
verify = True # verify by default
|
||||
ca_cert = None # no default CA Certificate
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
self._setup_verify()
|
||||
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||
|
||||
def _setup_verify(self):
|
||||
"""
|
||||
Setup Verify SSL or not
|
||||
|
||||
Reads security module's VERIFY_SSL_CERT and toggles whether
|
||||
the class overrides the connect() class method or runs the
|
||||
inherited httplib.HTTPSConnection connect()
|
||||
"""
|
||||
self.verify = libcloud.security.VERIFY_SSL_CERT
|
||||
|
||||
if self.verify:
|
||||
self._setup_ca_cert()
|
||||
else:
|
||||
warnings.warn(libcloud.security.VERIFY_SSL_DISABLED_MSG)
|
||||
|
||||
def _setup_ca_cert(self):
|
||||
"""
|
||||
Setup CA Certs
|
||||
|
||||
Search in CA_CERTS_PATH for valid candidates and
|
||||
return first match. Otherwise, complain about certs
|
||||
not being available.
|
||||
"""
|
||||
if not self.verify:
|
||||
return
|
||||
|
||||
ca_certs_available = [cert
|
||||
for cert in libcloud.security.CA_CERTS_PATH
|
||||
if os.path.exists(cert) and os.path.isfile(cert)]
|
||||
if ca_certs_available:
|
||||
# use first available certificate
|
||||
self.ca_cert = ca_certs_available[0]
|
||||
else:
|
||||
raise RuntimeError(
|
||||
libcloud.security.CA_CERTS_UNAVAILABLE_ERROR_MSG)
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connect
|
||||
|
||||
Checks if verification is toggled; if not, just call
|
||||
httplib.HTTPSConnection's connect
|
||||
"""
|
||||
if not self.verify:
|
||||
return httplib.HTTPSConnection.connect(self)
|
||||
|
||||
# otherwise, create a connection and verify the hostname
|
||||
# use socket.create_connection (in 2.6+) if possible
|
||||
if getattr(socket, 'create_connection', None):
|
||||
sock = socket.create_connection((self.host, self.port),
|
||||
self.timeout)
|
||||
else:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.connect((self.host, self.port))
|
||||
self.sock = ssl.wrap_socket(sock,
|
||||
self.key_file,
|
||||
self.cert_file,
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ca_certs=self.ca_cert,
|
||||
ssl_version=ssl.PROTOCOL_TLSv1)
|
||||
cert = self.sock.getpeercert()
|
||||
if not self._verify_hostname(self.host, cert):
|
||||
raise ssl.SSLError('Failed to verify hostname')
|
||||
|
||||
def _verify_hostname(self, hostname, cert):
|
||||
"""
|
||||
Verify hostname against peer cert
|
||||
|
||||
Check both commonName and entries in subjectAltName, using a
|
||||
rudimentary glob to dns regex check to find matches
|
||||
"""
|
||||
common_name = self._get_common_name(cert)
|
||||
alt_names = self._get_subject_alt_names(cert)
|
||||
|
||||
# replace * with alphanumeric and dash
|
||||
# replace . with literal .
|
||||
# http://www.dns.net/dnsrd/trick.html#legal-hostnames
|
||||
valid_patterns = [
|
||||
re.compile('^' + pattern.replace(r".", r"\.")
|
||||
.replace(r"*", r"[0-9A-Za-z\-]+") + '$')
|
||||
for pattern in (set(common_name) | set(alt_names))]
|
||||
|
||||
return any(
|
||||
pattern.search(hostname)
|
||||
for pattern in valid_patterns
|
||||
)
|
||||
|
||||
def _get_subject_alt_names(self, cert):
|
||||
"""
|
||||
Get SubjectAltNames
|
||||
|
||||
Retrieve 'subjectAltName' attributes from cert data structure
|
||||
"""
|
||||
if 'subjectAltName' not in cert:
|
||||
values = []
|
||||
else:
|
||||
values = [value
|
||||
for field, value in cert['subjectAltName']
|
||||
if field == 'DNS']
|
||||
return values
|
||||
|
||||
def _get_common_name(self, cert):
|
||||
"""
|
||||
Get Common Name
|
||||
|
||||
Retrieve 'commonName' attribute from cert data structure
|
||||
"""
|
||||
if 'subject' not in cert:
|
||||
return None
|
||||
values = [value[0][1]
|
||||
for value in cert['subject']
|
||||
if value[0][0] == 'commonName']
|
||||
return values
|
||||
25
awx/lib/site-packages/libcloud/loadbalancer/__init__.py
Normal file
25
awx/lib/site-packages/libcloud/loadbalancer/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Module for working with Load Balancers
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'base',
|
||||
'providers',
|
||||
'types',
|
||||
'drivers'
|
||||
]
|
||||
346
awx/lib/site-packages/libcloud/loadbalancer/base.py
Normal file
346
awx/lib/site-packages/libcloud/loadbalancer/base.py
Normal file
@@ -0,0 +1,346 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.common.base import ConnectionKey, BaseDriver
|
||||
from libcloud.common.types import LibcloudError
|
||||
|
||||
__all__ = [
|
||||
'Member',
|
||||
'LoadBalancer',
|
||||
'Algorithm',
|
||||
'Driver',
|
||||
'DEFAULT_ALGORITHM'
|
||||
]
|
||||
|
||||
|
||||
class Member(object):
|
||||
"""
|
||||
Represents a load balancer member.
|
||||
"""
|
||||
|
||||
def __init__(self, id, ip, port, balancer=None, extra=None):
|
||||
"""
|
||||
:param id: Member ID.
|
||||
:type id: ``str``
|
||||
|
||||
:param ip: IP address of this member.
|
||||
:param ip: ``str``
|
||||
|
||||
:param port: Port of this member
|
||||
:param port: ``str``
|
||||
|
||||
:param balancer: Balancer this member is attached to. (optional)
|
||||
:param balancer: :class:`.LoadBalancer`
|
||||
|
||||
:param extra: Provider specific attributes.
|
||||
:type extra: ``dict``
|
||||
"""
|
||||
self.id = str(id) if id else None
|
||||
self.ip = ip
|
||||
self.port = port
|
||||
self.balancer = balancer
|
||||
self.extra = extra or {}
|
||||
|
||||
def __repr__(self):
|
||||
return ('<Member: id=%s, address=%s:%s>' % (self.id,
|
||||
self.ip, self.port))
|
||||
|
||||
|
||||
class LoadBalancer(object):
|
||||
"""
|
||||
Provide a common interface for handling Load Balancers.
|
||||
"""
|
||||
|
||||
def __init__(self, id, name, state, ip, port, driver, extra=None):
|
||||
"""
|
||||
:param id: Load balancer ID.
|
||||
:type id: ``str``
|
||||
|
||||
:param name: Load balancer name.
|
||||
:type name: ``str``
|
||||
|
||||
:param state: State this loadbalancer is in.
|
||||
:type state: :class:`libcloud.loadbalancer.types.State`
|
||||
|
||||
:param ip: IP address of this loadbalancer.
|
||||
:type ip: ``str``
|
||||
|
||||
:param port: Port of this loadbalancer.
|
||||
:type port: ``int``
|
||||
|
||||
:param driver: Driver this loadbalancer belongs to.
|
||||
:type driver: :class:`.Driver`
|
||||
|
||||
:param extra: Provier specific attributes. (optional)
|
||||
:type extra: ``dict``
|
||||
"""
|
||||
self.id = str(id) if id else None
|
||||
self.name = name
|
||||
self.state = state
|
||||
self.ip = ip
|
||||
self.port = port
|
||||
self.driver = driver
|
||||
self.extra = extra or {}
|
||||
|
||||
def attach_compute_node(self, node):
|
||||
return self.driver.balancer_attach_compute_node(balancer=self,
|
||||
node=node)
|
||||
|
||||
def attach_member(self, member):
|
||||
return self.driver.balancer_attach_member(balancer=self,
|
||||
member=member)
|
||||
|
||||
def detach_member(self, member):
|
||||
return self.driver.balancer_detach_member(balancer=self,
|
||||
member=member)
|
||||
|
||||
def list_members(self):
|
||||
return self.driver.balancer_list_members(balancer=self)
|
||||
|
||||
def destroy(self):
|
||||
return self.driver.destroy_balancer(balancer=self)
|
||||
|
||||
def __repr__(self):
|
||||
return ('<LoadBalancer: id=%s, name=%s, state=%s>' % (self.id,
|
||||
self.name, self.state))
|
||||
|
||||
|
||||
class Algorithm(object):
|
||||
"""
|
||||
Represents a load balancing algorithm.
|
||||
"""
|
||||
|
||||
RANDOM = 0
|
||||
ROUND_ROBIN = 1
|
||||
LEAST_CONNECTIONS = 2
|
||||
WEIGHTED_ROUND_ROBIN = 3
|
||||
WEIGHTED_LEAST_CONNECTIONS = 4
|
||||
|
||||
DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN
|
||||
|
||||
|
||||
class Driver(BaseDriver):
|
||||
"""
|
||||
A base Driver class to derive from
|
||||
|
||||
This class is always subclassed by a specific driver.
|
||||
"""
|
||||
|
||||
name = None
|
||||
website = None
|
||||
|
||||
connectionCls = ConnectionKey
|
||||
_ALGORITHM_TO_VALUE_MAP = {}
|
||||
_VALUE_TO_ALGORITHM_MAP = {}
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None,
|
||||
port=None, **kwargs):
|
||||
super(Driver, self).__init__(key=key, secret=secret, secure=secure,
|
||||
host=host, port=port, **kwargs)
|
||||
|
||||
def list_protocols(self):
|
||||
"""
|
||||
Return a list of supported protocols.
|
||||
|
||||
:rtype: ``list`` of ``str``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'list_protocols not implemented for this driver')
|
||||
|
||||
def list_balancers(self):
|
||||
"""
|
||||
List all loadbalancers
|
||||
|
||||
:rtype: ``list`` of :class:`LoadBalancer`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'list_balancers not implemented for this driver')
|
||||
|
||||
def create_balancer(self, name, port, protocol, algorithm, members):
|
||||
"""
|
||||
Create a new load balancer instance
|
||||
|
||||
:param name: Name of the new load balancer (required)
|
||||
:type name: ``str``
|
||||
|
||||
:param port: Port the load balancer should listen on, defaults to 80
|
||||
:type port: ``str``
|
||||
|
||||
:param protocol: Loadbalancer protocol, defaults to http.
|
||||
:type protocol: ``str``
|
||||
|
||||
:param members: list of Members to attach to balancer
|
||||
:type members: ``list`` of :class:`Member`
|
||||
|
||||
:param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN.
|
||||
:type algorithm: :class:`Algorithm`
|
||||
|
||||
:rtype: :class:`LoadBalancer`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'create_balancer not implemented for this driver')
|
||||
|
||||
def destroy_balancer(self, balancer):
|
||||
"""
|
||||
Destroy a load balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:return: ``True`` if the destroy was successful, otherwise ``False``.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
|
||||
raise NotImplementedError(
|
||||
'destroy_balancer not implemented for this driver')
|
||||
|
||||
def get_balancer(self, balancer_id):
|
||||
"""
|
||||
Return a :class:`LoadBalancer` object.
|
||||
|
||||
:param balancer_id: id of a load balancer you want to fetch
|
||||
:type balancer_id: ``str``
|
||||
|
||||
:rtype: :class:`LoadBalancer`
|
||||
"""
|
||||
|
||||
raise NotImplementedError(
|
||||
'get_balancer not implemented for this driver')
|
||||
|
||||
def update_balancer(self, balancer, **kwargs):
|
||||
"""
|
||||
Sets the name, algorithm, protocol, or port on a load balancer.
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param name: New load balancer name
|
||||
:type name: ``str``
|
||||
|
||||
:param algorithm: New load balancer algorithm
|
||||
:type algorithm: :class:`Algorithm`
|
||||
|
||||
:param protocol: New load balancer protocol
|
||||
:type protocol: ``str``
|
||||
|
||||
:param port: New load balancer port
|
||||
:type port: ``int``
|
||||
|
||||
:rtype: :class:`LoadBalancer`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'update_balancer not implemented for this driver')
|
||||
|
||||
def balancer_attach_compute_node(self, balancer, node):
|
||||
"""
|
||||
Attach a compute node as a member to the load balancer.
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param node: Node to join to the balancer
|
||||
:type node: :class:`Node`
|
||||
|
||||
:return: Member after joining the balancer.
|
||||
:rtype: :class:`Member`
|
||||
"""
|
||||
|
||||
member = Member(id=None, ip=node.public_ips[0], port=balancer.port)
|
||||
return self.balancer_attach_member(balancer, member)
|
||||
|
||||
def balancer_attach_member(self, balancer, member):
|
||||
"""
|
||||
Attach a member to balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param member: Member to join to the balancer
|
||||
:type member: :class:`Member`
|
||||
|
||||
:return: Member after joining the balancer.
|
||||
:rtype: :class:`Member`
|
||||
"""
|
||||
|
||||
raise NotImplementedError(
|
||||
'balancer_attach_member not implemented for this driver')
|
||||
|
||||
def balancer_detach_member(self, balancer, member):
|
||||
"""
|
||||
Detach member from balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param member: Member which should be used
|
||||
:type member: :class:`Member`
|
||||
|
||||
:return: ``True`` if member detach was successful, otherwise ``False``.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
|
||||
raise NotImplementedError(
|
||||
'balancer_detach_member not implemented for this driver')
|
||||
|
||||
def balancer_list_members(self, balancer):
|
||||
"""
|
||||
Return list of members attached to balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:rtype: ``list`` of :class:`Member`
|
||||
"""
|
||||
|
||||
raise NotImplementedError(
|
||||
'balancer_list_members not implemented for this driver')
|
||||
|
||||
def list_supported_algorithms(self):
|
||||
"""
|
||||
Return algorithms supported by this driver.
|
||||
|
||||
:rtype: ``list`` of ``str``
|
||||
"""
|
||||
return list(self._ALGORITHM_TO_VALUE_MAP.keys())
|
||||
|
||||
def _value_to_algorithm(self, value):
|
||||
"""
|
||||
Return :class`Algorithm` based on the value.
|
||||
|
||||
:param value: Algorithm name (e.g. http, tcp, ...).
|
||||
:type value: ``str``
|
||||
|
||||
@rype :class:`Algorithm`
|
||||
"""
|
||||
try:
|
||||
return self._VALUE_TO_ALGORITHM_MAP[value]
|
||||
except KeyError:
|
||||
raise LibcloudError(value='Invalid value: %s' % (value),
|
||||
driver=self)
|
||||
|
||||
def _algorithm_to_value(self, algorithm):
|
||||
"""
|
||||
Return string value for the provided algorithm.
|
||||
|
||||
:param value: Algorithm enum.
|
||||
:type value: :class:`Algorithm`
|
||||
|
||||
@rype ``str``
|
||||
"""
|
||||
try:
|
||||
return self._ALGORITHM_TO_VALUE_MAP[algorithm]
|
||||
except KeyError:
|
||||
raise LibcloudError(value='Invalid algorithm: %s' % (algorithm),
|
||||
driver=self)
|
||||
@@ -0,0 +1,19 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'rackspace',
|
||||
'gogrid'
|
||||
]
|
||||
136
awx/lib/site-packages/libcloud/loadbalancer/drivers/brightbox.py
Normal file
136
awx/lib/site-packages/libcloud/loadbalancer/drivers/brightbox.py
Normal file
@@ -0,0 +1,136 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.common.brightbox import BrightboxConnection
|
||||
from libcloud.loadbalancer.base import Driver, Algorithm, Member
|
||||
from libcloud.loadbalancer.base import LoadBalancer
|
||||
from libcloud.loadbalancer.types import State
|
||||
from libcloud.utils.misc import reverse_dict
|
||||
|
||||
API_VERSION = '1.0'
|
||||
|
||||
|
||||
class BrightboxLBDriver(Driver):
|
||||
connectionCls = BrightboxConnection
|
||||
|
||||
name = 'Brightbox'
|
||||
website = 'http://www.brightbox.co.uk/'
|
||||
|
||||
LB_STATE_MAP = {
|
||||
'creating': State.PENDING,
|
||||
'active': State.RUNNING,
|
||||
'deleting': State.UNKNOWN,
|
||||
'deleted': State.UNKNOWN,
|
||||
'failing': State.UNKNOWN,
|
||||
'failed': State.UNKNOWN,
|
||||
}
|
||||
|
||||
_VALUE_TO_ALGORITHM_MAP = {
|
||||
'round-robin': Algorithm.ROUND_ROBIN,
|
||||
'least-connections': Algorithm.LEAST_CONNECTIONS
|
||||
}
|
||||
|
||||
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
|
||||
|
||||
def list_protocols(self):
|
||||
return ['tcp', 'http']
|
||||
|
||||
def list_balancers(self):
|
||||
data = self.connection.request('/%s/load_balancers' % API_VERSION) \
|
||||
.object
|
||||
|
||||
return list(map(self._to_balancer, data))
|
||||
|
||||
def create_balancer(self, name, port, protocol, algorithm, members):
|
||||
response = self._post(
|
||||
'/%s/load_balancers' % API_VERSION,
|
||||
{'name': name,
|
||||
'nodes': list(map(self._member_to_node, members)),
|
||||
'policy': self._algorithm_to_value(algorithm),
|
||||
'listeners': [{'in': port, 'out': port, 'protocol': protocol}],
|
||||
'healthcheck': {'type': protocol, 'port': port}}
|
||||
)
|
||||
|
||||
return self._to_balancer(response.object)
|
||||
|
||||
def destroy_balancer(self, balancer):
|
||||
response = self.connection.request('/%s/load_balancers/%s' %
|
||||
(API_VERSION, balancer.id),
|
||||
method='DELETE')
|
||||
|
||||
return response.status == httplib.ACCEPTED
|
||||
|
||||
def get_balancer(self, balancer_id):
|
||||
data = self.connection.request(
|
||||
'/%s/load_balancers/%s' % (API_VERSION, balancer_id)).object
|
||||
return self._to_balancer(data)
|
||||
|
||||
def balancer_attach_compute_node(self, balancer, node):
|
||||
return self.balancer_attach_member(balancer, node)
|
||||
|
||||
def balancer_attach_member(self, balancer, member):
|
||||
path = '/%s/load_balancers/%s/add_nodes' % (API_VERSION, balancer.id)
|
||||
|
||||
self._post(path, {'nodes': [self._member_to_node(member)]})
|
||||
|
||||
return member
|
||||
|
||||
def balancer_detach_member(self, balancer, member):
|
||||
path = '/%s/load_balancers/%s/remove_nodes' % (API_VERSION,
|
||||
balancer.id)
|
||||
|
||||
response = self._post(path, {'nodes': [self._member_to_node(member)]})
|
||||
|
||||
return response.status == httplib.ACCEPTED
|
||||
|
||||
def balancer_list_members(self, balancer):
|
||||
path = '/%s/load_balancers/%s' % (API_VERSION, balancer.id)
|
||||
|
||||
data = self.connection.request(path).object
|
||||
|
||||
func = lambda data: self._node_to_member(data, balancer)
|
||||
return list(map(func, data['nodes']))
|
||||
|
||||
def _post(self, path, data={}):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
|
||||
return self.connection.request(path, data=data, headers=headers,
|
||||
method='POST')
|
||||
|
||||
def _to_balancer(self, data):
|
||||
return LoadBalancer(
|
||||
id=data['id'],
|
||||
name=data['name'],
|
||||
state=self.LB_STATE_MAP.get(data['status'], State.UNKNOWN),
|
||||
ip=self._public_ip(data),
|
||||
port=data['listeners'][0]['in'],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
def _member_to_node(self, member):
|
||||
return {'node': member.id}
|
||||
|
||||
def _node_to_member(self, data, balancer):
|
||||
return Member(id=data['id'], ip=None, port=None, balancer=balancer)
|
||||
|
||||
def _public_ip(self, data):
|
||||
if len(data['cloud_ips']) > 0:
|
||||
ip = data['cloud_ips'][0]['public_ip']
|
||||
else:
|
||||
ip = None
|
||||
|
||||
return ip
|
||||
@@ -0,0 +1,178 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.common.cloudstack import CloudStackDriverMixIn
|
||||
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
|
||||
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
|
||||
from libcloud.loadbalancer.types import Provider
|
||||
from libcloud.loadbalancer.types import State
|
||||
from libcloud.utils.misc import reverse_dict
|
||||
|
||||
|
||||
class CloudStackLBDriver(CloudStackDriverMixIn, Driver):
|
||||
"""Driver for CloudStack load balancers."""
|
||||
|
||||
api_name = 'cloudstack_lb'
|
||||
name = 'CloudStack'
|
||||
website = 'http://cloudstack.org/'
|
||||
type = Provider.CLOUDSTACK
|
||||
|
||||
_VALUE_TO_ALGORITHM_MAP = {
|
||||
'roundrobin': Algorithm.ROUND_ROBIN,
|
||||
'leastconn': Algorithm.LEAST_CONNECTIONS
|
||||
}
|
||||
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
|
||||
|
||||
LB_STATE_MAP = {
|
||||
'Active': State.RUNNING,
|
||||
}
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None,
|
||||
path=None, port=None, *args, **kwargs):
|
||||
"""
|
||||
@inherits: :class:`Driver.__init__`
|
||||
"""
|
||||
host = host if host else self.host
|
||||
path = path if path else self.path
|
||||
|
||||
if path is not None:
|
||||
self.path = path
|
||||
|
||||
if host is not None:
|
||||
self.host = host
|
||||
|
||||
if (self.type == Provider.CLOUDSTACK) and (not host or not path):
|
||||
raise Exception('When instantiating CloudStack driver directly ' +
|
||||
'you also need to provide host and path argument')
|
||||
|
||||
super(CloudStackLBDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure,
|
||||
host=host, port=port)
|
||||
|
||||
def list_protocols(self):
|
||||
"""
|
||||
We don't actually have any protocol awareness beyond TCP.
|
||||
|
||||
:rtype: ``list`` of ``str``
|
||||
"""
|
||||
return ['tcp']
|
||||
|
||||
def list_balancers(self):
|
||||
balancers = self._sync_request(command='listLoadBalancerRules',
|
||||
method='GET')
|
||||
balancers = balancers.get('loadbalancerrule', [])
|
||||
return [self._to_balancer(balancer) for balancer in balancers]
|
||||
|
||||
def get_balancer(self, balancer_id):
|
||||
balancer = self._sync_request(command='listLoadBalancerRules',
|
||||
params={'id': balancer_id},
|
||||
method='GET')
|
||||
balancer = balancer.get('loadbalancerrule', [])
|
||||
if not balancer:
|
||||
raise Exception("no such load balancer: " + str(balancer_id))
|
||||
return self._to_balancer(balancer[0])
|
||||
|
||||
def create_balancer(self, name, members, protocol='http', port=80,
|
||||
algorithm=DEFAULT_ALGORITHM, location=None,
|
||||
private_port=None):
|
||||
"""
|
||||
@inherits: :class:`Driver.create_balancer`
|
||||
|
||||
:param location: Location
|
||||
:type location: :class:`NodeLocation`
|
||||
|
||||
:param private_port: Private port
|
||||
:type private_port: ``int``
|
||||
"""
|
||||
if location is None:
|
||||
locations = self._sync_request(command='listZones', method='GET')
|
||||
location = locations['zone'][0]['id']
|
||||
else:
|
||||
location = location.id
|
||||
if private_port is None:
|
||||
private_port = port
|
||||
|
||||
result = self._async_request(command='associateIpAddress',
|
||||
params={'zoneid': location},
|
||||
method='GET')
|
||||
public_ip = result['ipaddress']
|
||||
|
||||
result = self._sync_request(
|
||||
command='createLoadBalancerRule',
|
||||
params={'algorithm': self._ALGORITHM_TO_VALUE_MAP[algorithm],
|
||||
'name': name,
|
||||
'privateport': private_port,
|
||||
'publicport': port,
|
||||
'publicipid': public_ip['id']},
|
||||
method='GET')
|
||||
|
||||
balancer = self._to_balancer(result['loadbalancer'])
|
||||
|
||||
for member in members:
|
||||
balancer.attach_member(member)
|
||||
|
||||
return balancer
|
||||
|
||||
def destroy_balancer(self, balancer):
|
||||
self._async_request(command='deleteLoadBalancerRule',
|
||||
params={'id': balancer.id},
|
||||
method='GET')
|
||||
self._async_request(command='disassociateIpAddress',
|
||||
params={'id': balancer.ex_public_ip_id},
|
||||
method='GET')
|
||||
|
||||
def balancer_attach_member(self, balancer, member):
|
||||
member.port = balancer.ex_private_port
|
||||
self._async_request(command='assignToLoadBalancerRule',
|
||||
params={'id': balancer.id,
|
||||
'virtualmachineids': member.id},
|
||||
method='GET')
|
||||
return True
|
||||
|
||||
def balancer_detach_member(self, balancer, member):
|
||||
self._async_request(command='removeFromLoadBalancerRule',
|
||||
params={'id': balancer.id,
|
||||
'virtualmachineids': member.id},
|
||||
method='GET')
|
||||
return True
|
||||
|
||||
def balancer_list_members(self, balancer):
|
||||
members = self._sync_request(command='listLoadBalancerRuleInstances',
|
||||
params={'id': balancer.id},
|
||||
method='GET')
|
||||
members = members['loadbalancerruleinstance']
|
||||
return [self._to_member(m, balancer.ex_private_port, balancer)
|
||||
for m in members]
|
||||
|
||||
def _to_balancer(self, obj):
|
||||
balancer = LoadBalancer(
|
||||
id=obj['id'],
|
||||
name=obj['name'],
|
||||
state=self.LB_STATE_MAP.get(obj['state'], State.UNKNOWN),
|
||||
ip=obj['publicip'],
|
||||
port=obj['publicport'],
|
||||
driver=self.connection.driver
|
||||
)
|
||||
balancer.ex_private_port = obj['privateport']
|
||||
balancer.ex_public_ip_id = obj['publicipid']
|
||||
return balancer
|
||||
|
||||
def _to_member(self, obj, port, balancer):
|
||||
return Member(
|
||||
id=obj['id'],
|
||||
ip=obj['nic'][0]['ipaddress'],
|
||||
port=port,
|
||||
balancer=balancer
|
||||
)
|
||||
350
awx/lib/site-packages/libcloud/loadbalancer/drivers/elb.py
Normal file
350
awx/lib/site-packages/libcloud/loadbalancer/drivers/elb.py
Normal file
@@ -0,0 +1,350 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
'ElasticLBDriver'
|
||||
]
|
||||
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.xml import findtext, findall
|
||||
from libcloud.loadbalancer.types import State
|
||||
from libcloud.loadbalancer.base import Driver, LoadBalancer, Member
|
||||
from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection
|
||||
|
||||
|
||||
VERSION = '2012-06-01'
|
||||
HOST = 'elasticloadbalancing.%s.amazonaws.com'
|
||||
ROOT = '/%s/' % (VERSION)
|
||||
NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, )
|
||||
|
||||
|
||||
class ELBResponse(AWSGenericResponse):
|
||||
"""
|
||||
Amazon ELB response class.
|
||||
"""
|
||||
namespace = NS
|
||||
exceptions = {}
|
||||
xpath = 'Error'
|
||||
|
||||
|
||||
class ELBConnection(SignedAWSConnection):
|
||||
version = VERSION
|
||||
host = HOST
|
||||
responseCls = ELBResponse
|
||||
|
||||
|
||||
class ElasticLBDriver(Driver):
|
||||
name = 'Amazon Elastic Load Balancing'
|
||||
website = 'http://aws.amazon.com/elasticloadbalancing/'
|
||||
connectionCls = ELBConnection
|
||||
|
||||
def __init__(self, access_id, secret, region):
|
||||
super(ElasticLBDriver, self).__init__(access_id, secret)
|
||||
self.region = region
|
||||
self.connection.host = HOST % (region)
|
||||
|
||||
def list_protocols(self):
|
||||
return ['tcp', 'ssl', 'http', 'https']
|
||||
|
||||
def list_balancers(self):
|
||||
params = {'Action': 'DescribeLoadBalancers'}
|
||||
data = self.connection.request(ROOT, params=params).object
|
||||
return self._to_balancers(data)
|
||||
|
||||
def create_balancer(self, name, port, protocol, algorithm, members,
|
||||
ex_members_availability_zones=None):
|
||||
if ex_members_availability_zones is None:
|
||||
ex_members_availability_zones = ['a']
|
||||
|
||||
params = {
|
||||
'Action': 'CreateLoadBalancer',
|
||||
'LoadBalancerName': name,
|
||||
'Listeners.member.1.InstancePort': str(port),
|
||||
'Listeners.member.1.InstanceProtocol': protocol.upper(),
|
||||
'Listeners.member.1.LoadBalancerPort': str(port),
|
||||
'Listeners.member.1.Protocol': protocol.upper(),
|
||||
}
|
||||
|
||||
for i, z in enumerate(ex_members_availability_zones):
|
||||
zone = ''.join((self.region, z))
|
||||
params['AvailabilityZones.member.%d' % (i + 1)] = zone
|
||||
|
||||
data = self.connection.request(ROOT, params=params).object
|
||||
|
||||
balancer = LoadBalancer(
|
||||
id=name,
|
||||
name=name,
|
||||
state=State.PENDING,
|
||||
ip=findtext(element=data, xpath='DNSName', namespace=NS),
|
||||
port=port,
|
||||
driver=self.connection.driver
|
||||
)
|
||||
balancer._members = []
|
||||
return balancer
|
||||
|
||||
def destroy_balancer(self, balancer):
|
||||
params = {
|
||||
'Action': 'DeleteLoadBalancer',
|
||||
'LoadBalancerName': balancer.id
|
||||
}
|
||||
self.connection.request(ROOT, params=params)
|
||||
return True
|
||||
|
||||
def get_balancer(self, balancer_id):
|
||||
params = {
|
||||
'Action': 'DescribeLoadBalancers',
|
||||
'LoadBalancerNames.member.1': balancer_id
|
||||
}
|
||||
data = self.connection.request(ROOT, params=params).object
|
||||
return self._to_balancers(data)[0]
|
||||
|
||||
def balancer_attach_compute_node(self, balancer, node):
|
||||
params = {
|
||||
'Action': 'RegisterInstancesWithLoadBalancer',
|
||||
'LoadBalancerName': balancer.id,
|
||||
'Instances.member.1.InstanceId': node.id
|
||||
}
|
||||
self.connection.request(ROOT, params=params)
|
||||
balancer._members.append(Member(node.id, None, None, balancer=self))
|
||||
|
||||
def balancer_detach_member(self, balancer, member):
|
||||
params = {
|
||||
'Action': 'DeregisterInstancesFromLoadBalancer',
|
||||
'LoadBalancerName': balancer.id,
|
||||
'Instances.member.1.InstanceId': member.id
|
||||
}
|
||||
self.connection.request(ROOT, params=params)
|
||||
balancer._members = [m for m in balancer._members if m.id != member.id]
|
||||
return True
|
||||
|
||||
def balancer_list_members(self, balancer):
|
||||
return balancer._members
|
||||
|
||||
def ex_list_balancer_policies(self, balancer):
|
||||
"""
|
||||
Return a list of policy description string.
|
||||
|
||||
:rtype: ``list`` of ``str``
|
||||
"""
|
||||
params = {
|
||||
'Action': 'DescribeLoadBalancerPolicies',
|
||||
'LoadBalancerName': balancer.id
|
||||
}
|
||||
|
||||
data = self.connection.request(ROOT, params=params).object
|
||||
return self._to_policies(data)
|
||||
|
||||
def ex_list_balancer_policy_types(self):
|
||||
"""
|
||||
Return a list of policy type description string.
|
||||
|
||||
:rtype: ``list`` of ``str``
|
||||
"""
|
||||
params = {'Action': 'DescribeLoadBalancerPolicyTypes'}
|
||||
|
||||
data = self.connection.request(ROOT, params=params).object
|
||||
return self._to_policy_types(data)
|
||||
|
||||
def ex_create_balancer_policy(self, name, policy_name, policy_type,
|
||||
policy_attributes=None):
|
||||
"""
|
||||
Create a new load balancer policy
|
||||
|
||||
:param name: Balancer name to create the policy for
|
||||
:type name: ``str``
|
||||
|
||||
:param policy_name: policy to be created
|
||||
:type policy_name: ``str``
|
||||
|
||||
:param policy_type: policy type being used to create policy.
|
||||
:type policy_type: ``str``
|
||||
|
||||
:param policy_attributes: Each list contain values, ['AttributeName',
|
||||
'value']
|
||||
:type policy_attributes: ``PolicyAttribute list``
|
||||
"""
|
||||
params = {
|
||||
'Action': 'CreateLoadBalancerPolicy',
|
||||
'LoadBalancerName': name,
|
||||
'PolicyName': policy_name,
|
||||
'PolicyTypeName': policy_type
|
||||
}
|
||||
|
||||
if policy_attributes is not None:
|
||||
for index, (name, value) in enumerate(
|
||||
policy_attributes.iteritems(), 1):
|
||||
params['PolicyAttributes.member.%d. \
|
||||
AttributeName' % (index)] = name
|
||||
params['PolicyAttributes.member.%d. \
|
||||
AttributeValue' % (index)] = value
|
||||
|
||||
response = self.connection.request(ROOT, params=params)
|
||||
return response.status == httplib.OK
|
||||
|
||||
def ex_delete_balancer_policy(self, name, policy_name):
|
||||
"""
|
||||
Delete a load balancer policy
|
||||
|
||||
:param name: balancer name for which policy will be deleted
|
||||
:type name: ``str``
|
||||
|
||||
:param policy_name: The Mnemonic name for the policy being deleted
|
||||
:type policy_name: ``str``
|
||||
"""
|
||||
params = {
|
||||
'Action': 'DeleteLoadBalancerPolicy',
|
||||
'LoadBalancerName': name,
|
||||
'PolicyName': policy_name
|
||||
}
|
||||
|
||||
response = self.connection.request(ROOT, params=params)
|
||||
return response.status == httplib.OK
|
||||
|
||||
def ex_set_balancer_policies_listener(self, name, port, policies):
|
||||
"""
|
||||
Associates, updates, or disables a policy with a listener on
|
||||
the load balancer
|
||||
|
||||
:param name: balancer name to set policies for listerner
|
||||
:type name: ``str``
|
||||
|
||||
:param port: port to use
|
||||
:type port: ``str``
|
||||
|
||||
:param policies: List of policies to be associated with the balancer
|
||||
:type policies: ``string list``
|
||||
"""
|
||||
params = {
|
||||
'Action': 'SetLoadBalancerPoliciesOfListener',
|
||||
'LoadBalancerName': name,
|
||||
'LoadBalancerPort': str(port)
|
||||
}
|
||||
|
||||
if policies:
|
||||
params = self._create_list_params(params, policies,
|
||||
'PolicyNames.member.%d')
|
||||
|
||||
response = self.connection.request(ROOT, params=params)
|
||||
return response.status == httplib.OK
|
||||
|
||||
def ex_set_balancer_policies_backend_server(self, name, instance_port,
|
||||
policies):
|
||||
"""
|
||||
Replaces the current set of policies associated with a port on
|
||||
which the back-end server is listening with a new set of policies
|
||||
|
||||
:param name: balancer name to set policies of backend server
|
||||
:type name: ``str``
|
||||
|
||||
:param instance_port: Instance Port
|
||||
:type instance_port: ``int``
|
||||
|
||||
:param policies: List of policies to be associated with the balancer
|
||||
:type policies: ``string list`
|
||||
"""
|
||||
params = {
|
||||
'Action': 'SetLoadBalancerPoliciesForBackendServer',
|
||||
'LoadBalancerName': name,
|
||||
'InstancePort': str(instance_port)
|
||||
}
|
||||
|
||||
if policies:
|
||||
params = self._create_list_params(params, policies,
|
||||
'PolicyNames.member.%d')
|
||||
|
||||
response = self.connection.request(ROOT, params=params)
|
||||
return response.status == httplib.OK
|
||||
|
||||
def ex_create_balancer_listeners(self, name, listeners=None):
|
||||
"""
|
||||
Creates one or more listeners on a load balancer for the specified port
|
||||
|
||||
:param name: The mnemonic name associated with the load balancer
|
||||
:type name: ``str``
|
||||
|
||||
:param listeners: Each tuple contain values, (LoadBalancerPortNumber,
|
||||
InstancePortNumber, Protocol,[SSLCertificateId])
|
||||
:type listeners: ``list of tuple`
|
||||
"""
|
||||
params = {
|
||||
'Action': 'CreateLoadBalancerListeners',
|
||||
'LoadBalancerName': name
|
||||
}
|
||||
|
||||
for index, listener in enumerate(listeners):
|
||||
i = index + 1
|
||||
protocol = listener[2].upper()
|
||||
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
|
||||
params['Listeners.member.%d.InstancePort' % i] = listener[1]
|
||||
params['Listeners.member.%d.Protocol' % i] = listener[2]
|
||||
if protocol == 'HTTPS' or protocol == 'SSL':
|
||||
params['Listeners.member.%d. \
|
||||
SSLCertificateId' % i] = listener[3]
|
||||
else:
|
||||
return False
|
||||
|
||||
response = self.connection.request(ROOT, params=params)
|
||||
return response.status == httplib.OK
|
||||
|
||||
def _to_policies(self, data):
|
||||
xpath = 'DescribeLoadBalancerPoliciesResult/PolicyDescriptions/member'
|
||||
return [findtext(element=el, xpath='PolicyName', namespace=NS)
|
||||
for el in findall(element=data, xpath=xpath, namespace=NS)]
|
||||
|
||||
def _to_policy_types(self, data):
|
||||
xpath = 'DescribeLoadBalancerPolicyTypesResult/'
|
||||
xpath += 'PolicyTypeDescriptions/member'
|
||||
return [findtext(element=el, xpath='PolicyTypeName', namespace=NS)
|
||||
for el in findall(element=data, xpath=xpath, namespace=NS)]
|
||||
|
||||
def _to_balancers(self, data):
|
||||
xpath = 'DescribeLoadBalancersResult/LoadBalancerDescriptions/member'
|
||||
return [self._to_balancer(el)
|
||||
for el in findall(element=data, xpath=xpath, namespace=NS)]
|
||||
|
||||
def _to_balancer(self, el):
|
||||
name = findtext(element=el, xpath='LoadBalancerName', namespace=NS)
|
||||
dns_name = findtext(el, xpath='DNSName', namespace=NS)
|
||||
port = findtext(el, xpath='LoadBalancerPort', namespace=NS)
|
||||
|
||||
balancer = LoadBalancer(
|
||||
id=name,
|
||||
name=name,
|
||||
state=State.UNKNOWN,
|
||||
ip=dns_name,
|
||||
port=port,
|
||||
driver=self.connection.driver
|
||||
)
|
||||
|
||||
xpath = 'Instances/member/InstanceId'
|
||||
members = findall(element=el, xpath=xpath, namespace=NS)
|
||||
balancer._members = []
|
||||
|
||||
for m in members:
|
||||
balancer._members.append(Member(m.text, None, None,
|
||||
balancer=balancer))
|
||||
|
||||
return balancer
|
||||
|
||||
def _create_list_params(self, params, items, label):
|
||||
"""
|
||||
return parameter list
|
||||
"""
|
||||
if isinstance(items, str):
|
||||
items = [items]
|
||||
for index, item in enumerate(items):
|
||||
params[label % (index + 1)] = item
|
||||
return params
|
||||
362
awx/lib/site-packages/libcloud/loadbalancer/drivers/gce.py
Normal file
362
awx/lib/site-packages/libcloud/loadbalancer/drivers/gce.py
Normal file
@@ -0,0 +1,362 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json # NOQA
|
||||
|
||||
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
|
||||
from libcloud.compute.drivers.gce import GCEConnection, GCENodeDriver
|
||||
|
||||
# GCE doesn't actually give you a algorithm choice, but this is here simply as
|
||||
# the closest match. The actual algorithm is described here:
|
||||
# https://developers.google.com/compute/docs/load-balancing/#overview
|
||||
DEFAULT_ALGORITHM = Algorithm.RANDOM
|
||||
|
||||
|
||||
class GCELBDriver(Driver):
|
||||
connectionCls = GCEConnection
|
||||
apiname = 'googleapis'
|
||||
name = 'Google Compute Engine Load Balancer'
|
||||
website = 'https://cloud.google.com/'
|
||||
|
||||
_VALUE_TO_ALGORITHM_MAP = {
|
||||
'RANDOM': Algorithm.RANDOM
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
if kwargs.get('gce_driver'):
|
||||
self.gce = kwargs['gce_driver']
|
||||
else:
|
||||
self.gce = GCENodeDriver(*args, **kwargs)
|
||||
|
||||
self.connection = self.gce.connection
|
||||
|
||||
def _get_node_from_ip(self, ip):
|
||||
"""
|
||||
Return the node object that matches a given public IP address.
|
||||
|
||||
:param ip: Public IP address to search for
|
||||
:type ip: ``str``
|
||||
|
||||
:return: Node object that has the given IP, or None if not found.
|
||||
:rtype: :class:`Node` or None
|
||||
"""
|
||||
all_nodes = self.gce.list_nodes(ex_zone='all')
|
||||
for node in all_nodes:
|
||||
if ip in node.public_ips:
|
||||
return node
|
||||
return None
|
||||
|
||||
def list_protocols(self):
|
||||
"""
|
||||
Return a list of supported protocols.
|
||||
|
||||
For GCE, this is simply a hardcoded list.
|
||||
|
||||
:rtype: ``list`` of ``str``
|
||||
"""
|
||||
return ['TCP', 'UDP']
|
||||
|
||||
def list_balancers(self, ex_region=None):
|
||||
"""
|
||||
List all loadbalancers
|
||||
|
||||
:keyword ex_region: The region to return balancers from. If None,
|
||||
will default to self.region. If 'all', will
|
||||
return all balancers.
|
||||
:type ex_region: ``str`` or :class:`GCERegion` or ``None``
|
||||
|
||||
:rtype: ``list`` of :class:`LoadBalancer`
|
||||
"""
|
||||
balancers = []
|
||||
for fwr in self.gce.ex_list_forwarding_rules(region=ex_region):
|
||||
balancers.append(self._forwarding_rule_to_loadbalancer(fwr))
|
||||
return balancers
|
||||
|
||||
def create_balancer(self, name, port, protocol, algorithm, members,
|
||||
ex_region=None, ex_healthchecks=None, ex_address=None):
|
||||
"""
|
||||
Create a new load balancer instance.
|
||||
|
||||
For GCE, this means creating a forwarding rule and a matching target
|
||||
pool, then adding the members to the target pool.
|
||||
|
||||
:param name: Name of the new load balancer (required)
|
||||
:type name: ``str``
|
||||
|
||||
:param port: Port or range of ports the load balancer should listen
|
||||
on, defaults to all ports. Examples: '80', '5000-5999'
|
||||
:type port: ``str``
|
||||
|
||||
:param protocol: Load balancer protocol. Should be 'tcp' or 'udp',
|
||||
defaults to 'tcp'.
|
||||
:type protocol: ``str``
|
||||
|
||||
:param members: List of Members to attach to balancer. Can be Member
|
||||
objects or Node objects. Node objects are preferred
|
||||
for GCE, but Member objects are accepted to comply
|
||||
with the established libcloud API. Note that the
|
||||
'port' attribute of the members is ignored.
|
||||
:type members: ``list`` of :class:`Member` or :class:`Node`
|
||||
|
||||
:param algorithm: Load balancing algorithm. Ignored for GCE which
|
||||
uses a hashing-based algorithm.
|
||||
:type algorithm: :class:`Algorithm` or ``None``
|
||||
|
||||
:keyword ex_region: Optional region to create the load balancer in.
|
||||
Defaults to the default region of the GCE Node
|
||||
Driver.
|
||||
:type ex_region: C{GCERegion} or ``str``
|
||||
|
||||
:keyword ex_healthchecks: Optional list of healthcheck objects or
|
||||
names to add to the load balancer.
|
||||
:type ex_healthchecks: ``list`` of :class:`GCEHealthCheck` or
|
||||
``str``
|
||||
|
||||
:keyword ex_address: Optional static address object to be assigned to
|
||||
the load balancer.
|
||||
:type ex_address: C{GCEAddress}
|
||||
|
||||
:return: LoadBalancer object
|
||||
:rtype: :class:`LoadBalancer`
|
||||
"""
|
||||
node_list = []
|
||||
for member in members:
|
||||
# Member object
|
||||
if hasattr(member, 'ip'):
|
||||
if member.extra.get('node'):
|
||||
node_list.append(member.extra['node'])
|
||||
else:
|
||||
node_list.append(self._get_node_from_ip(member.ip))
|
||||
# Node object
|
||||
elif hasattr(member, 'name'):
|
||||
node_list.append(member)
|
||||
# Assume it's a node name otherwise
|
||||
else:
|
||||
node_list.append(self.gce.ex_get_node(member, 'all'))
|
||||
|
||||
# Create Target Pool
|
||||
tp_name = '%s-tp' % name
|
||||
targetpool = self.gce.ex_create_targetpool(
|
||||
tp_name, region=ex_region, healthchecks=ex_healthchecks,
|
||||
nodes=node_list)
|
||||
|
||||
# Create the Forwarding rule, but if it fails, delete the target pool.
|
||||
try:
|
||||
forwarding_rule = self.gce.ex_create_forwarding_rule(
|
||||
name, targetpool, region=ex_region, protocol=protocol,
|
||||
port_range=port, address=ex_address)
|
||||
except:
|
||||
targetpool.destroy()
|
||||
raise
|
||||
|
||||
# Reformat forwarding rule to LoadBalancer object
|
||||
return self._forwarding_rule_to_loadbalancer(forwarding_rule)
|
||||
|
||||
def destroy_balancer(self, balancer):
|
||||
"""
|
||||
Destroy a load balancer.
|
||||
|
||||
For GCE, this means destroying the associated forwarding rule, then
|
||||
destroying the target pool that was attached to the forwarding rule.
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:return: True if successful
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
destroy = balancer.extra['forwarding_rule'].destroy()
|
||||
if destroy:
|
||||
tp_destroy = balancer.extra['targetpool'].destroy()
|
||||
return tp_destroy
|
||||
else:
|
||||
return destroy
|
||||
|
||||
def get_balancer(self, balancer_id):
|
||||
"""
|
||||
Return a :class:`LoadBalancer` object.
|
||||
|
||||
:param balancer_id: Name of load balancer you wish to fetch. For GCE,
|
||||
this is the name of the associated forwarding
|
||||
rule.
|
||||
:param balancer_id: ``str``
|
||||
|
||||
:rtype: :class:`LoadBalancer`
|
||||
"""
|
||||
fwr = self.gce.ex_get_forwarding_rule(balancer_id)
|
||||
return self._forwarding_rule_to_loadbalancer(fwr)
|
||||
|
||||
def balancer_attach_compute_node(self, balancer, node):
|
||||
"""
|
||||
Attach a compute node as a member to the load balancer.
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param node: Node to join to the balancer
|
||||
:type node: :class:`Node`
|
||||
|
||||
:return: Member after joining the balancer.
|
||||
:rtype: :class:`Member`
|
||||
"""
|
||||
add_node = balancer.extra['targetpool'].add_node(node)
|
||||
if add_node:
|
||||
return self._node_to_member(node, balancer)
|
||||
|
||||
def balancer_attach_member(self, balancer, member):
|
||||
"""
|
||||
Attach a member to balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param member: Member to join to the balancer
|
||||
:type member: :class:`Member`
|
||||
|
||||
:return: Member after joining the balancer.
|
||||
:rtype: :class:`Member`
|
||||
"""
|
||||
node = member.extra.get('node') or self._get_node_from_ip(member.ip)
|
||||
add_node = balancer.extra['targetpool'].add_node(node)
|
||||
if add_node:
|
||||
return self._node_to_member(node, balancer)
|
||||
|
||||
def balancer_detach_member(self, balancer, member):
|
||||
"""
|
||||
Detach member from balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param member: Member which should be used
|
||||
:type member: :class:`Member`
|
||||
|
||||
:return: True if member detach was successful, otherwise False
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
node = member.extra.get('node') or self._get_node_from_ip(member.ip)
|
||||
remove_node = balancer.extra['targetpool'].remove_node(node)
|
||||
return remove_node
|
||||
|
||||
def balancer_list_members(self, balancer):
|
||||
"""
|
||||
Return list of members attached to balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:rtype: ``list`` of :class:`Member`
|
||||
"""
|
||||
return [self._node_to_member(n, balancer) for n in
|
||||
balancer.extra['targetpool'].nodes]
|
||||
|
||||
def ex_create_healthcheck(self, *args, **kwargs):
|
||||
return self.gce.ex_create_healthcheck(*args, **kwargs)
|
||||
|
||||
def ex_list_healthchecks(self):
|
||||
return self.gce.ex_list_healthchecks()
|
||||
|
||||
def ex_balancer_attach_healthcheck(self, balancer, healthcheck):
|
||||
"""
|
||||
Attach a healthcheck to balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param healthcheck: Healthcheck to add
|
||||
:type healthcheck: :class:`GCEHealthCheck`
|
||||
|
||||
:return: True if successful
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
return balancer.extra['targetpool'].add_healthcheck(healthcheck)
|
||||
|
||||
def ex_balancer_detach_healthcheck(self, balancer, healthcheck):
|
||||
"""
|
||||
Detach healtcheck from balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:param healthcheck: Healthcheck to remove
|
||||
:type healthcheck: :class:`GCEHealthCheck`
|
||||
|
||||
:return: True if successful
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
return balancer.extra['targetpool'].remove_healthcheck(healthcheck)
|
||||
|
||||
def ex_balancer_list_healthchecks(self, balancer):
|
||||
"""
|
||||
Return list of healthchecks attached to balancer
|
||||
|
||||
:param balancer: LoadBalancer which should be used
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:rtype: ``list`` of :class:`HealthChecks`
|
||||
"""
|
||||
return balancer.extra['healthchecks']
|
||||
|
||||
def _node_to_member(self, node, balancer):
|
||||
"""
|
||||
Return a Member object based on a Node.
|
||||
|
||||
:param node: Node object
|
||||
:type node: :class:`Node`
|
||||
|
||||
:keyword balancer: The balancer the member is attached to.
|
||||
:type balancer: :class:`LoadBalancer`
|
||||
|
||||
:return: Member object
|
||||
:rtype: :class:`Member`
|
||||
"""
|
||||
# A balancer can have a node as a member, even if the node doesn't
|
||||
# exist. In this case, 'node' is simply a string to where the resource
|
||||
# would be found if it was there.
|
||||
if hasattr(node, 'name'):
|
||||
member_id = node.name
|
||||
member_ip = node.public_ips[0]
|
||||
else:
|
||||
member_id = node
|
||||
member_ip = None
|
||||
|
||||
extra = {'node': node}
|
||||
return Member(id=member_id, ip=member_ip, port=balancer.port,
|
||||
balancer=balancer, extra=extra)
|
||||
|
||||
def _forwarding_rule_to_loadbalancer(self, forwarding_rule):
|
||||
"""
|
||||
Return a Load Balancer object based on a GCEForwardingRule object.
|
||||
|
||||
:param forwarding_rule: ForwardingRule object
|
||||
:type forwarding_rule: :class:`GCEForwardingRule`
|
||||
|
||||
:return: LoadBalancer object
|
||||
:rtype: :class:`LoadBalancer`
|
||||
"""
|
||||
extra = {}
|
||||
extra['forwarding_rule'] = forwarding_rule
|
||||
extra['targetpool'] = forwarding_rule.targetpool
|
||||
extra['healthchecks'] = forwarding_rule.targetpool.healthchecks
|
||||
|
||||
return LoadBalancer(id=forwarding_rule.id,
|
||||
name=forwarding_rule.name, state=None,
|
||||
ip=forwarding_rule.address,
|
||||
port=forwarding_rule.extra['portRange'],
|
||||
driver=self, extra=extra)
|
||||
239
awx/lib/site-packages/libcloud/loadbalancer/drivers/gogrid.py
Normal file
239
awx/lib/site-packages/libcloud/loadbalancer/drivers/gogrid.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.utils.misc import reverse_dict
|
||||
from libcloud.common.types import LibcloudError
|
||||
from libcloud.common.gogrid import GoGridConnection, GoGridResponse,\
|
||||
BaseGoGridDriver
|
||||
from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm
|
||||
from libcloud.loadbalancer.base import DEFAULT_ALGORITHM
|
||||
from libcloud.loadbalancer.types import State, LibcloudLBImmutableError
|
||||
|
||||
|
||||
class GoGridLBResponse(GoGridResponse):
|
||||
def success(self):
|
||||
if self.status == httplib.INTERNAL_SERVER_ERROR:
|
||||
# Hack, but at least this error message is more useful than
|
||||
# "unexpected server error"
|
||||
body = json.loads(self.body)
|
||||
if body['method'] == '/grid/loadbalancer/add' and \
|
||||
len(body['list']) >= 1 and \
|
||||
body['list'][0]['message'].find(
|
||||
'unexpected server error') != -1:
|
||||
raise LibcloudError(
|
||||
value='You mostly likely tried to add a member with an IP'
|
||||
' address not assigned to your account', driver=self)
|
||||
return super(GoGridLBResponse, self).success()
|
||||
|
||||
|
||||
class GoGridLBConnection(GoGridConnection):
|
||||
"""
|
||||
Connection class for the GoGrid load-balancer driver.
|
||||
"""
|
||||
responseCls = GoGridLBResponse
|
||||
|
||||
|
||||
class GoGridLBDriver(BaseGoGridDriver, Driver):
|
||||
connectionCls = GoGridLBConnection
|
||||
api_name = 'gogrid_lb'
|
||||
name = 'GoGrid LB'
|
||||
website = 'http://www.gogrid.com/'
|
||||
|
||||
LB_STATE_MAP = {'On': State.RUNNING,
|
||||
'Unknown': State.UNKNOWN}
|
||||
_VALUE_TO_ALGORITHM_MAP = {
|
||||
'round robin': Algorithm.ROUND_ROBIN,
|
||||
'least connect': Algorithm.LEAST_CONNECTIONS
|
||||
}
|
||||
_ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
@inherits: :class:`Driver.__init__`
|
||||
"""
|
||||
super(GoGridLBDriver, self).__init__(*args, **kwargs)
|
||||
|
||||
def list_protocols(self):
|
||||
# GoGrid only supports http
|
||||
return ['http']
|
||||
|
||||
def list_balancers(self):
|
||||
return self._to_balancers(
|
||||
self.connection.request('/api/grid/loadbalancer/list').object)
|
||||
|
||||
def ex_create_balancer_nowait(self, name, members, protocol='http',
|
||||
port=80, algorithm=DEFAULT_ALGORITHM):
|
||||
"""
|
||||
@inherits: :class:`Driver.create_balancer`
|
||||
"""
|
||||
algorithm = self._algorithm_to_value(algorithm)
|
||||
|
||||
params = {'name': name,
|
||||
'loadbalancer.type': algorithm,
|
||||
'virtualip.ip': self._get_first_ip(),
|
||||
'virtualip.port': port}
|
||||
params.update(self._members_to_params(members))
|
||||
|
||||
resp = self.connection.request('/api/grid/loadbalancer/add',
|
||||
method='GET',
|
||||
params=params)
|
||||
return self._to_balancers(resp.object)[0]
|
||||
|
||||
def create_balancer(self, name, members, protocol='http', port=80,
|
||||
algorithm=DEFAULT_ALGORITHM):
|
||||
balancer = self.ex_create_balancer_nowait(name, members, protocol,
|
||||
port, algorithm)
|
||||
|
||||
timeout = 60 * 20
|
||||
waittime = 0
|
||||
interval = 2 * 15
|
||||
|
||||
if balancer.id is not None:
|
||||
return balancer
|
||||
else:
|
||||
while waittime < timeout:
|
||||
balancers = self.list_balancers()
|
||||
|
||||
for i in balancers:
|
||||
if i.name == balancer.name and i.id is not None:
|
||||
return i
|
||||
|
||||
waittime += interval
|
||||
time.sleep(interval)
|
||||
|
||||
raise Exception('Failed to get id')
|
||||
|
||||
def destroy_balancer(self, balancer):
|
||||
try:
|
||||
resp = self.connection.request(
|
||||
'/api/grid/loadbalancer/delete', method='POST',
|
||||
params={'id': balancer.id})
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
if "Update request for LoadBalancer" in str(e):
|
||||
raise LibcloudLBImmutableError(
|
||||
"Cannot delete immutable object", GoGridLBDriver)
|
||||
else:
|
||||
raise
|
||||
|
||||
return resp.status == 200
|
||||
|
||||
def get_balancer(self, **kwargs):
|
||||
params = {}
|
||||
|
||||
try:
|
||||
params['name'] = kwargs['ex_balancer_name']
|
||||
except KeyError:
|
||||
balancer_id = kwargs['balancer_id']
|
||||
params['id'] = balancer_id
|
||||
|
||||
resp = self.connection.request('/api/grid/loadbalancer/get',
|
||||
params=params)
|
||||
|
||||
return self._to_balancers(resp.object)[0]
|
||||
|
||||
def balancer_attach_member(self, balancer, member):
|
||||
members = self.balancer_list_members(balancer)
|
||||
members.append(member)
|
||||
|
||||
params = {"id": balancer.id}
|
||||
|
||||
params.update(self._members_to_params(members))
|
||||
|
||||
resp = self._update_balancer(params)
|
||||
return [m for m in
|
||||
self._to_members(resp.object["list"][0]["realiplist"],
|
||||
balancer)
|
||||
if m.ip == member.ip][0]
|
||||
|
||||
def balancer_detach_member(self, balancer, member):
|
||||
members = self.balancer_list_members(balancer)
|
||||
|
||||
remaining_members = [n for n in members if n.id != member.id]
|
||||
|
||||
params = {"id": balancer.id}
|
||||
params.update(self._members_to_params(remaining_members))
|
||||
|
||||
resp = self._update_balancer(params)
|
||||
|
||||
return resp.status == 200
|
||||
|
||||
def balancer_list_members(self, balancer):
|
||||
resp = self.connection.request('/api/grid/loadbalancer/get',
|
||||
params={'id': balancer.id})
|
||||
return self._to_members(resp.object["list"][0]["realiplist"], balancer)
|
||||
|
||||
def _update_balancer(self, params):
|
||||
try:
|
||||
return self.connection.request('/api/grid/loadbalancer/edit',
|
||||
method='POST',
|
||||
params=params)
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
if "Update already pending" in str(e):
|
||||
raise LibcloudLBImmutableError(
|
||||
"Balancer is immutable", GoGridLBDriver)
|
||||
|
||||
raise LibcloudError(value='Exception: %s' % str(e), driver=self)
|
||||
|
||||
def _members_to_params(self, members):
|
||||
"""
|
||||
Helper method to convert list of :class:`Member` objects
|
||||
to GET params.
|
||||
|
||||
"""
|
||||
|
||||
params = {}
|
||||
|
||||
i = 0
|
||||
for member in members:
|
||||
params["realiplist.%s.ip" % i] = member.ip
|
||||
params["realiplist.%s.port" % i] = member.port
|
||||
i += 1
|
||||
|
||||
return params
|
||||
|
||||
def _to_balancers(self, object):
|
||||
return [self._to_balancer(el) for el in object["list"]]
|
||||
|
||||
def _to_balancer(self, el):
|
||||
lb = LoadBalancer(id=el.get("id"),
|
||||
name=el["name"],
|
||||
state=self.LB_STATE_MAP.get(
|
||||
el["state"]["name"], State.UNKNOWN),
|
||||
ip=el["virtualip"]["ip"]["ip"],
|
||||
port=el["virtualip"]["port"],
|
||||
driver=self.connection.driver)
|
||||
return lb
|
||||
|
||||
def _to_members(self, object, balancer=None):
|
||||
return [self._to_member(el, balancer) for el in object]
|
||||
|
||||
def _to_member(self, el, balancer=None):
|
||||
member = Member(id=el["ip"]["id"],
|
||||
ip=el["ip"]["ip"],
|
||||
port=el["port"],
|
||||
balancer=balancer)
|
||||
return member
|
||||
@@ -0,0 +1,29 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.loadbalancer.providers import Provider
|
||||
|
||||
from libcloud.loadbalancer.drivers.cloudstack import CloudStackLBDriver
|
||||
|
||||
|
||||
class NinefoldLBDriver(CloudStackLBDriver):
|
||||
"Driver for load balancers on Ninefold's Compute platform."
|
||||
|
||||
host = 'api.ninefold.com'
|
||||
path = '/compute/v1.0/'
|
||||
|
||||
type = Provider.NINEFOLD
|
||||
name = 'Ninefold LB'
|
||||
website = 'http://ninefold.com/'
|
||||
1530
awx/lib/site-packages/libcloud/loadbalancer/drivers/rackspace.py
Normal file
1530
awx/lib/site-packages/libcloud/loadbalancer/drivers/rackspace.py
Normal file
File diff suppressed because it is too large
Load Diff
55
awx/lib/site-packages/libcloud/loadbalancer/providers.py
Normal file
55
awx/lib/site-packages/libcloud/loadbalancer/providers.py
Normal file
@@ -0,0 +1,55 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from libcloud.utils.misc import get_driver as get_provider_driver
|
||||
from libcloud.utils.misc import set_driver as set_provider_driver
|
||||
from libcloud.loadbalancer.types import Provider
|
||||
|
||||
__all__ = [
|
||||
"Provider",
|
||||
"DRIVERS",
|
||||
"get_driver",
|
||||
]
|
||||
|
||||
DRIVERS = {
|
||||
Provider.RACKSPACE:
|
||||
('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'),
|
||||
Provider.GOGRID:
|
||||
('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'),
|
||||
Provider.NINEFOLD:
|
||||
('libcloud.loadbalancer.drivers.ninefold', 'NinefoldLBDriver'),
|
||||
Provider.BRIGHTBOX:
|
||||
('libcloud.loadbalancer.drivers.brightbox', 'BrightboxLBDriver'),
|
||||
Provider.ELB:
|
||||
('libcloud.loadbalancer.drivers.elb', 'ElasticLBDriver'),
|
||||
Provider.CLOUDSTACK:
|
||||
('libcloud.loadbalancer.drivers.cloudstack', 'CloudStackLBDriver'),
|
||||
Provider.GCE:
|
||||
('libcloud.loadbalancer.drivers.gce', 'GCELBDriver'),
|
||||
|
||||
# Deprecated
|
||||
Provider.RACKSPACE_US:
|
||||
('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'),
|
||||
Provider.RACKSPACE_UK:
|
||||
('libcloud.loadbalancer.drivers.rackspace', 'RackspaceUKLBDriver'),
|
||||
}
|
||||
|
||||
|
||||
def get_driver(provider):
|
||||
return get_provider_driver(DRIVERS, provider)
|
||||
|
||||
|
||||
def set_driver(provider, module, klass):
|
||||
return set_provider_driver(DRIVERS, provider, module, klass)
|
||||
70
awx/lib/site-packages/libcloud/loadbalancer/types.py
Normal file
70
awx/lib/site-packages/libcloud/loadbalancer/types.py
Normal file
@@ -0,0 +1,70 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__all__ = [
|
||||
"Provider",
|
||||
"State",
|
||||
"LibcloudLBError",
|
||||
"LibcloudLBImmutableError",
|
||||
]
|
||||
|
||||
from libcloud.common.types import LibcloudError
|
||||
|
||||
|
||||
class LibcloudLBError(LibcloudError):
|
||||
pass
|
||||
|
||||
|
||||
class LibcloudLBImmutableError(LibcloudLBError):
|
||||
pass
|
||||
|
||||
|
||||
class Provider(object):
|
||||
RACKSPACE = 'rackspace'
|
||||
GOGRID = 'gogrid'
|
||||
NINEFOLD = 'ninefold'
|
||||
BRIGHTBOX = 'brightbox'
|
||||
ELB = 'elb'
|
||||
CLOUDSTACK = 'cloudstack'
|
||||
GCE = 'gce'
|
||||
|
||||
# Deprecated
|
||||
RACKSPACE_US = 'rackspace_us'
|
||||
RACKSPACE_UK = 'rackspace_uk'
|
||||
|
||||
|
||||
class State(object):
|
||||
"""
|
||||
Standard states for a loadbalancer
|
||||
|
||||
:cvar RUNNING: loadbalancer is running and ready to use
|
||||
:cvar UNKNOWN: loabalancer state is unknown
|
||||
"""
|
||||
|
||||
RUNNING = 0
|
||||
PENDING = 1
|
||||
UNKNOWN = 2
|
||||
ERROR = 3
|
||||
DELETED = 4
|
||||
|
||||
|
||||
class MemberCondition(object):
|
||||
"""
|
||||
Each member of a load balancer can have an associated condition
|
||||
which determines its role within the load balancer.
|
||||
"""
|
||||
ENABLED = 0
|
||||
DISABLED = 1
|
||||
DRAINING = 2
|
||||
216
awx/lib/site-packages/libcloud/pricing.py
Normal file
216
awx/lib/site-packages/libcloud/pricing.py
Normal file
@@ -0,0 +1,216 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import with_statement
|
||||
|
||||
"""
|
||||
A class which handles loading the pricing files.
|
||||
"""
|
||||
|
||||
import os.path
|
||||
from os.path import join as pjoin
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from libcloud.utils.connection import get_response_object
|
||||
|
||||
__all__ = [
|
||||
'get_pricing',
|
||||
'get_size_price',
|
||||
'set_pricing',
|
||||
'clear_pricing_data',
|
||||
'download_pricing_file'
|
||||
]
|
||||
|
||||
# Default URL to the pricing file
|
||||
DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' # NOQA
|
||||
|
||||
CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
|
||||
DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json')
|
||||
CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json')
|
||||
|
||||
# Pricing data cache
|
||||
PRICING_DATA = {
|
||||
'compute': {},
|
||||
'storage': {}
|
||||
}
|
||||
|
||||
VALID_PRICING_DRIVER_TYPES = ['compute', 'storage']
|
||||
|
||||
|
||||
def get_pricing_file_path(file_path=None):
|
||||
if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \
|
||||
os.path.isfile(CUSTOM_PRICING_FILE_PATH):
|
||||
# Custom pricing file is available, use it
|
||||
return CUSTOM_PRICING_FILE_PATH
|
||||
|
||||
return DEFAULT_PRICING_FILE_PATH
|
||||
|
||||
|
||||
def get_pricing(driver_type, driver_name, pricing_file_path=None):
|
||||
"""
|
||||
Return pricing for the provided driver.
|
||||
|
||||
:type driver_type: ``str``
|
||||
:param driver_type: Driver type ('compute' or 'storage')
|
||||
|
||||
:type driver_name: ``str`
|
||||
:param driver_name: Driver name
|
||||
|
||||
:type pricing_file_path: ``str``
|
||||
:param pricing_file_path: Custom path to a price file. If not provided
|
||||
it uses a default path.
|
||||
|
||||
:rtype: ``dict``
|
||||
:return: Dictionary with pricing where a key name is size ID and
|
||||
the value is a price.
|
||||
"""
|
||||
if driver_type not in VALID_PRICING_DRIVER_TYPES:
|
||||
raise AttributeError('Invalid driver type: %s', driver_type)
|
||||
|
||||
if driver_name in PRICING_DATA[driver_type]:
|
||||
return PRICING_DATA[driver_type][driver_name]
|
||||
|
||||
if not pricing_file_path:
|
||||
pricing_file_path = get_pricing_file_path(file_path=pricing_file_path)
|
||||
|
||||
with open(pricing_file_path) as fp:
|
||||
content = fp.read()
|
||||
|
||||
pricing_data = json.loads(content)
|
||||
size_pricing = pricing_data[driver_type][driver_name]
|
||||
|
||||
for driver_type in VALID_PRICING_DRIVER_TYPES:
|
||||
pricing = pricing_data.get(driver_type, None)
|
||||
if pricing:
|
||||
PRICING_DATA[driver_type] = pricing
|
||||
|
||||
return size_pricing
|
||||
|
||||
|
||||
def set_pricing(driver_type, driver_name, pricing):
|
||||
"""
|
||||
Populate the driver pricing dictionary.
|
||||
|
||||
:type driver_type: ``str``
|
||||
:param driver_type: Driver type ('compute' or 'storage')
|
||||
|
||||
:type driver_name: ``str``
|
||||
:param driver_name: Driver name
|
||||
|
||||
:type pricing: ``dict``
|
||||
:param pricing: Dictionary where a key is a size ID and a value is a price.
|
||||
"""
|
||||
|
||||
PRICING_DATA[driver_type][driver_name] = pricing
|
||||
|
||||
|
||||
def get_size_price(driver_type, driver_name, size_id):
|
||||
"""
|
||||
Return price for the provided size.
|
||||
|
||||
:type driver_type: ``str``
|
||||
:param driver_type: Driver type ('compute' or 'storage')
|
||||
|
||||
:type driver_name: ``str``
|
||||
:param driver_name: Driver name
|
||||
|
||||
:type size_id: ``str`` or ``int``
|
||||
:param size_id: Unique size ID (can be an integer or a string - depends on
|
||||
the driver)
|
||||
|
||||
:rtype: ``float``
|
||||
:return: Size price.
|
||||
"""
|
||||
pricing = get_pricing(driver_type=driver_type, driver_name=driver_name)
|
||||
price = float(pricing[size_id])
|
||||
return price
|
||||
|
||||
|
||||
def invalidate_pricing_cache():
|
||||
"""
|
||||
Invalidate pricing cache for all the drivers.
|
||||
"""
|
||||
PRICING_DATA['compute'] = {}
|
||||
PRICING_DATA['storage'] = {}
|
||||
|
||||
|
||||
def clear_pricing_data():
|
||||
"""
|
||||
Invalidate pricing cache for all the drivers.
|
||||
|
||||
Note: This method does the same thing as invalidate_pricing_cache and is
|
||||
here for backward compatibility reasons.
|
||||
"""
|
||||
invalidate_pricing_cache()
|
||||
|
||||
|
||||
def invalidate_module_pricing_cache(driver_type, driver_name):
|
||||
"""
|
||||
Invalidate the cache for the specified driver.
|
||||
|
||||
:type driver_type: ``str``
|
||||
:param driver_type: Driver type ('compute' or 'storage')
|
||||
|
||||
:type driver_name: ``str``
|
||||
:param driver_name: Driver name
|
||||
"""
|
||||
if driver_name in PRICING_DATA[driver_type]:
|
||||
del PRICING_DATA[driver_type][driver_name]
|
||||
|
||||
|
||||
def download_pricing_file(file_url=DEFAULT_FILE_URL,
|
||||
file_path=CUSTOM_PRICING_FILE_PATH):
|
||||
"""
|
||||
Download pricing file from the file_url and save it to file_path.
|
||||
|
||||
:type file_url: ``str``
|
||||
:param file_url: URL pointing to the pricing file.
|
||||
|
||||
:type file_path: ``str``
|
||||
:param file_path: Path where a download pricing file will be saved.
|
||||
"""
|
||||
dir_name = os.path.dirname(file_path)
|
||||
|
||||
if not os.path.exists(dir_name):
|
||||
# Verify a valid path is provided
|
||||
msg = ('Can\'t write to %s, directory %s, doesn\'t exist' %
|
||||
(file_path, dir_name))
|
||||
raise ValueError(msg)
|
||||
|
||||
if os.path.exists(file_path) and os.path.isdir(file_path):
|
||||
msg = ('Can\'t write to %s file path because it\'s a'
|
||||
' directory' % (file_path))
|
||||
raise ValueError(msg)
|
||||
|
||||
response = get_response_object(file_url)
|
||||
body = response.body
|
||||
|
||||
# Verify pricing file is valid
|
||||
try:
|
||||
data = json.loads(body)
|
||||
except json.decoder.JSONDecodeError:
|
||||
msg = 'Provided URL doesn\'t contain valid pricing data'
|
||||
raise Exception(msg)
|
||||
|
||||
if not data.get('updated', None):
|
||||
msg = 'Provided URL doesn\'t contain valid pricing data'
|
||||
raise Exception(msg)
|
||||
|
||||
# No need to stream it since file is small
|
||||
with open(file_path, 'w') as file_handle:
|
||||
file_handle.write(body)
|
||||
80
awx/lib/site-packages/libcloud/security.py
Normal file
80
awx/lib/site-packages/libcloud/security.py
Normal file
@@ -0,0 +1,80 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Security (SSL) Settings
|
||||
|
||||
Usage:
|
||||
import libcloud.security
|
||||
libcloud.security.VERIFY_SSL_CERT = True
|
||||
|
||||
# Optional.
|
||||
libcloud.security.CA_CERTS_PATH.append('/path/to/cacert.txt')
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
VERIFY_SSL_CERT = True
|
||||
|
||||
# File containing one or more PEM-encoded CA certificates
|
||||
# concatenated together.
|
||||
CA_CERTS_PATH = [
|
||||
# centos/fedora: openssl
|
||||
'/etc/pki/tls/certs/ca-bundle.crt',
|
||||
|
||||
# debian/ubuntu/arch/gentoo: ca-certificates
|
||||
'/etc/ssl/certs/ca-certificates.crt',
|
||||
|
||||
# freebsd: ca_root_nss
|
||||
'/usr/local/share/certs/ca-root-nss.crt',
|
||||
|
||||
# macports: curl-ca-bundle
|
||||
'/opt/local/share/curl/curl-ca-bundle.crt',
|
||||
|
||||
# homebrew: openssl
|
||||
'/usr/local/etc/openssl/cert.pem',
|
||||
|
||||
# homebrew: curl-ca-bundle (backward compatibility)
|
||||
'/usr/local/opt/curl-ca-bundle/share/ca-bundle.crt',
|
||||
]
|
||||
|
||||
# Allow user to explicitly specify which CA bundle to use, using an environment
|
||||
# variable
|
||||
environment_cert_file = os.getenv('SSL_CERT_FILE', None)
|
||||
if environment_cert_file is not None:
|
||||
# Make sure the file exists
|
||||
if not os.path.exists(environment_cert_file):
|
||||
raise ValueError('Certificate file %s doesn\'t exist' %
|
||||
(environment_cert_file))
|
||||
|
||||
if not os.path.isfile(environment_cert_file):
|
||||
raise ValueError('Certificate file can\'t be a directory')
|
||||
|
||||
# If a provided file exists we ignore other common paths because we
|
||||
# don't want to fall-back to a potentially less restrictive bundle
|
||||
CA_CERTS_PATH = [environment_cert_file]
|
||||
|
||||
CA_CERTS_UNAVAILABLE_ERROR_MSG = (
|
||||
'No CA Certificates were found in CA_CERTS_PATH. For information on '
|
||||
'how to get required certificate files, please visit '
|
||||
'https://libcloud.readthedocs.org/en/latest/other/'
|
||||
'ssl-certificate-validation.html'
|
||||
)
|
||||
|
||||
VERIFY_SSL_DISABLED_MSG = (
|
||||
'SSL certificate verification is disabled, this can pose a '
|
||||
'security risk. For more information how to enable the SSL '
|
||||
'certificate verification, please visit the libcloud '
|
||||
'documentation.'
|
||||
)
|
||||
3
awx/lib/site-packages/libcloud/storage/__init__.py
Normal file
3
awx/lib/site-packages/libcloud/storage/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Module for working with Storage
|
||||
"""
|
||||
825
awx/lib/site-packages/libcloud/storage/base.py
Normal file
825
awx/lib/site-packages/libcloud/storage/base.py
Normal file
@@ -0,0 +1,825 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Provides base classes for working with storage
|
||||
"""
|
||||
|
||||
# Backward compatibility for Python 2.5
|
||||
from __future__ import with_statement
|
||||
|
||||
import os.path # pylint: disable-msg=W0404
|
||||
import hashlib
|
||||
from os.path import join as pjoin
|
||||
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import next
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
import libcloud.utils.files
|
||||
from libcloud.common.types import LibcloudError
|
||||
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
|
||||
from libcloud.storage.types import ObjectDoesNotExistError
|
||||
|
||||
__all__ = [
|
||||
'Object',
|
||||
'Container',
|
||||
'StorageDriver',
|
||||
|
||||
'CHUNK_SIZE',
|
||||
'DEFAULT_CONTENT_TYPE'
|
||||
]
|
||||
|
||||
CHUNK_SIZE = 8096
|
||||
|
||||
# Default Content-Type which is sent when uploading an object if one is not
|
||||
# supplied and can't be detected when using non-strict mode.
|
||||
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
|
||||
|
||||
|
||||
class Object(object):
|
||||
"""
|
||||
Represents an object (BLOB).
|
||||
"""
|
||||
|
||||
def __init__(self, name, size, hash, extra, meta_data, container,
|
||||
driver):
|
||||
"""
|
||||
:param name: Object name (must be unique per container).
|
||||
:type name: ``str``
|
||||
|
||||
:param size: Object size in bytes.
|
||||
:type size: ``int``
|
||||
|
||||
:param hash: Object hash.
|
||||
:type hash: ``str``
|
||||
|
||||
:param container: Object container.
|
||||
:type container: :class:`Container`
|
||||
|
||||
:param extra: Extra attributes.
|
||||
:type extra: ``dict``
|
||||
|
||||
:param meta_data: Optional object meta data.
|
||||
:type meta_data: ``dict``
|
||||
|
||||
:param driver: StorageDriver instance.
|
||||
:type driver: :class:`StorageDriver`
|
||||
"""
|
||||
|
||||
self.name = name
|
||||
self.size = size
|
||||
self.hash = hash
|
||||
self.container = container
|
||||
self.extra = extra or {}
|
||||
self.meta_data = meta_data or {}
|
||||
self.driver = driver
|
||||
|
||||
def get_cdn_url(self):
|
||||
return self.driver.get_object_cdn_url(obj=self)
|
||||
|
||||
def enable_cdn(self, **kwargs):
|
||||
return self.driver.enable_object_cdn(obj=self, **kwargs)
|
||||
|
||||
def download(self, destination_path, overwrite_existing=False,
|
||||
delete_on_failure=True):
|
||||
return self.driver.download_object(self, destination_path,
|
||||
overwrite_existing,
|
||||
delete_on_failure)
|
||||
|
||||
def as_stream(self, chunk_size=None):
|
||||
return self.driver.download_object_as_stream(self, chunk_size)
|
||||
|
||||
def delete(self):
|
||||
return self.driver.delete_object(self)
|
||||
|
||||
def __repr__(self):
|
||||
return ('<Object: name=%s, size=%s, hash=%s, provider=%s ...>' %
|
||||
(self.name, self.size, self.hash, self.driver.name))
|
||||
|
||||
|
||||
class Container(object):
|
||||
"""
|
||||
Represents a container (bucket) which can hold multiple objects.
|
||||
"""
|
||||
|
||||
def __init__(self, name, extra, driver):
|
||||
"""
|
||||
:param name: Container name (must be unique).
|
||||
:type name: ``str``
|
||||
|
||||
:param extra: Extra attributes.
|
||||
:type extra: ``dict``
|
||||
|
||||
:param driver: StorageDriver instance.
|
||||
:type driver: :class:`StorageDriver`
|
||||
"""
|
||||
|
||||
self.name = name
|
||||
self.extra = extra or {}
|
||||
self.driver = driver
|
||||
|
||||
def iterate_objects(self):
|
||||
return self.driver.iterate_container_objects(container=self)
|
||||
|
||||
def list_objects(self):
|
||||
return self.driver.list_container_objects(container=self)
|
||||
|
||||
def get_cdn_url(self):
|
||||
return self.driver.get_container_cdn_url(container=self)
|
||||
|
||||
def enable_cdn(self, **kwargs):
|
||||
return self.driver.enable_container_cdn(container=self, **kwargs)
|
||||
|
||||
def get_object(self, object_name):
|
||||
return self.driver.get_object(container_name=self.name,
|
||||
object_name=object_name)
|
||||
|
||||
def upload_object(self, file_path, object_name, extra=None, **kwargs):
|
||||
return self.driver.upload_object(
|
||||
file_path, self, object_name, extra=extra, **kwargs)
|
||||
|
||||
def upload_object_via_stream(self, iterator, object_name, extra=None,
|
||||
**kwargs):
|
||||
return self.driver.upload_object_via_stream(
|
||||
iterator, self, object_name, extra=extra, **kwargs)
|
||||
|
||||
def download_object(self, obj, destination_path, overwrite_existing=False,
|
||||
delete_on_failure=True):
|
||||
return self.driver.download_object(
|
||||
obj, destination_path, overwrite_existing=overwrite_existing,
|
||||
delete_on_failure=delete_on_failure)
|
||||
|
||||
def download_object_as_stream(self, obj, chunk_size=None):
|
||||
return self.driver.download_object_as_stream(obj, chunk_size)
|
||||
|
||||
def delete_object(self, obj):
|
||||
return self.driver.delete_object(obj)
|
||||
|
||||
def delete(self):
|
||||
return self.driver.delete_container(self)
|
||||
|
||||
def __repr__(self):
|
||||
return ('<Container: name=%s, provider=%s>'
|
||||
% (self.name, self.driver.name))
|
||||
|
||||
|
||||
class StorageDriver(BaseDriver):
|
||||
"""
|
||||
A base StorageDriver to derive from.
|
||||
"""
|
||||
|
||||
connectionCls = ConnectionUserAndKey
|
||||
name = None
|
||||
hash_type = 'md5'
|
||||
supports_chunked_encoding = False
|
||||
|
||||
# When strict mode is used, exception will be thrown if no content type is
|
||||
# provided and none can be detected when uploading an object
|
||||
strict_mode = False
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
**kwargs):
|
||||
super(StorageDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port, **kwargs)
|
||||
|
||||
def iterate_containers(self):
|
||||
"""
|
||||
Return a generator of containers for the given account
|
||||
|
||||
:return: A generator of Container instances.
|
||||
:rtype: ``generator`` of :class:`Container`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'iterate_containers not implemented for this driver')
|
||||
|
||||
def list_containers(self):
|
||||
"""
|
||||
Return a list of containers.
|
||||
|
||||
:return: A list of Container instances.
|
||||
:rtype: ``list`` of :class:`Container`
|
||||
"""
|
||||
return list(self.iterate_containers())
|
||||
|
||||
def iterate_container_objects(self, container):
|
||||
"""
|
||||
Return a generator of objects for the given container.
|
||||
|
||||
:param container: Container instance
|
||||
:type container: :class:`Container`
|
||||
|
||||
:return: A generator of Object instances.
|
||||
:rtype: ``generator`` of :class:`Object`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'iterate_container_objects not implemented for this driver')
|
||||
|
||||
def list_container_objects(self, container):
|
||||
"""
|
||||
Return a list of objects for the given container.
|
||||
|
||||
:param container: Container instance.
|
||||
:type container: :class:`Container`
|
||||
|
||||
:return: A list of Object instances.
|
||||
:rtype: ``list`` of :class:`Object`
|
||||
"""
|
||||
return list(self.iterate_container_objects(container))
|
||||
|
||||
def get_container(self, container_name):
|
||||
"""
|
||||
Return a container instance.
|
||||
|
||||
:param container_name: Container name.
|
||||
:type container_name: ``str``
|
||||
|
||||
:return: :class:`Container` instance.
|
||||
:rtype: :class:`Container`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'get_object not implemented for this driver')
|
||||
|
||||
def get_container_cdn_url(self, container):
|
||||
"""
|
||||
Return a container CDN URL.
|
||||
|
||||
:param container: Container instance
|
||||
:type container: :class:`Container`
|
||||
|
||||
:return: A CDN URL for this container.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'get_container_cdn_url not implemented for this driver')
|
||||
|
||||
def get_object(self, container_name, object_name):
|
||||
"""
|
||||
Return an object instance.
|
||||
|
||||
:param container_name: Container name.
|
||||
:type container_name: ``str``
|
||||
|
||||
:param object_name: Object name.
|
||||
:type object_name: ``str``
|
||||
|
||||
:return: :class:`Object` instance.
|
||||
:rtype: :class:`Object`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'get_object not implemented for this driver')
|
||||
|
||||
def get_object_cdn_url(self, obj):
|
||||
"""
|
||||
Return a object CDN URL.
|
||||
|
||||
:param obj: Object instance
|
||||
:type obj: :class:`Object`
|
||||
|
||||
:return: A CDN URL for this object.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'get_object_cdn_url not implemented for this driver')
|
||||
|
||||
def enable_container_cdn(self, container):
|
||||
"""
|
||||
Enable container CDN.
|
||||
|
||||
:param container: Container instance
|
||||
:type container: :class:`Container`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'enable_container_cdn not implemented for this driver')
|
||||
|
||||
def enable_object_cdn(self, obj):
|
||||
"""
|
||||
Enable object CDN.
|
||||
|
||||
:param obj: Object instance
|
||||
:type obj: :class:`Object`
|
||||
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'enable_object_cdn not implemented for this driver')
|
||||
|
||||
def download_object(self, obj, destination_path, overwrite_existing=False,
|
||||
delete_on_failure=True):
|
||||
"""
|
||||
Download an object to the specified destination path.
|
||||
|
||||
:param obj: Object instance.
|
||||
:type obj: :class:`Object`
|
||||
|
||||
:param destination_path: Full path to a file or a directory where the
|
||||
incoming file will be saved.
|
||||
:type destination_path: ``str``
|
||||
|
||||
:param overwrite_existing: True to overwrite an existing file,
|
||||
defaults to False.
|
||||
:type overwrite_existing: ``bool``
|
||||
|
||||
:param delete_on_failure: True to delete a partially downloaded file if
|
||||
the download was not successful (hash
|
||||
mismatch / file size).
|
||||
:type delete_on_failure: ``bool``
|
||||
|
||||
:return: True if an object has been successfully downloaded, False
|
||||
otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'download_object not implemented for this driver')
|
||||
|
||||
def download_object_as_stream(self, obj, chunk_size=None):
|
||||
"""
|
||||
Return a generator which yields object data.
|
||||
|
||||
:param obj: Object instance
|
||||
:type obj: :class:`Object`
|
||||
|
||||
:param chunk_size: Optional chunk size (in bytes).
|
||||
:type chunk_size: ``int``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'download_object_as_stream not implemented for this driver')
|
||||
|
||||
def upload_object(self, file_path, container, object_name, extra=None,
|
||||
verify_hash=True):
|
||||
"""
|
||||
Upload an object currently located on a disk.
|
||||
|
||||
:param file_path: Path to the object on disk.
|
||||
:type file_path: ``str``
|
||||
|
||||
:param container: Destination container.
|
||||
:type container: :class:`Container`
|
||||
|
||||
:param object_name: Object name.
|
||||
:type object_name: ``str``
|
||||
|
||||
:param verify_hash: Verify hash
|
||||
:type verify_hash: ``bool``
|
||||
|
||||
:param extra: Extra attributes (driver specific). (optional)
|
||||
:type extra: ``dict``
|
||||
|
||||
:rtype: :class:`Object`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'upload_object not implemented for this driver')
|
||||
|
||||
def upload_object_via_stream(self, iterator, container,
|
||||
object_name,
|
||||
extra=None):
|
||||
"""
|
||||
Upload an object using an iterator.
|
||||
|
||||
If a provider supports it, chunked transfer encoding is used and you
|
||||
don't need to know in advance the amount of data to be uploaded.
|
||||
|
||||
Otherwise if a provider doesn't support it, iterator will be exhausted
|
||||
so a total size for data to be uploaded can be determined.
|
||||
|
||||
Note: Exhausting the iterator means that the whole data must be
|
||||
buffered in memory which might result in memory exhausting when
|
||||
uploading a very large object.
|
||||
|
||||
If a file is located on a disk you are advised to use upload_object
|
||||
function which uses fs.stat function to determine the file size and it
|
||||
doesn't need to buffer whole object in the memory.
|
||||
|
||||
:type iterator: :class:`object`
|
||||
:param iterator: An object which implements the iterator interface.
|
||||
|
||||
:type container: :class:`Container`
|
||||
:param container: Destination container.
|
||||
|
||||
:type object_name: ``str``
|
||||
:param object_name: Object name.
|
||||
|
||||
:type extra: ``dict``
|
||||
:param extra: (optional) Extra attributes (driver specific). Note:
|
||||
This dictionary must contain a 'content_type' key which represents
|
||||
a content type of the stored object.
|
||||
|
||||
:rtype: ``object``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'upload_object_via_stream not implemented for this driver')
|
||||
|
||||
def delete_object(self, obj):
|
||||
"""
|
||||
Delete an object.
|
||||
|
||||
:type obj: :class:`Object`
|
||||
:param obj: Object instance.
|
||||
|
||||
:return: ``bool`` True on success.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'delete_object not implemented for this driver')
|
||||
|
||||
def create_container(self, container_name):
|
||||
"""
|
||||
Create a new container.
|
||||
|
||||
:type container_name: ``str``
|
||||
:param container_name: Container name.
|
||||
|
||||
:return: Container instance on success.
|
||||
:rtype: :class:`Container`
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'create_container not implemented for this driver')
|
||||
|
||||
def delete_container(self, container):
|
||||
"""
|
||||
Delete a container.
|
||||
|
||||
:type container: :class:`Container`
|
||||
:param container: Container instance
|
||||
|
||||
:return: ``True`` on success, ``False`` otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
'delete_container not implemented for this driver')
|
||||
|
||||
def _get_object(self, obj, callback, callback_kwargs, response,
|
||||
success_status_code=None):
|
||||
"""
|
||||
Call passed callback and start transfer of the object'
|
||||
|
||||
:type obj: :class:`Object`
|
||||
:param obj: Object instance.
|
||||
|
||||
:type callback: :class:`function`
|
||||
:param callback: Function which is called with the passed
|
||||
callback_kwargs
|
||||
|
||||
:type callback_kwargs: ``dict``
|
||||
:param callback_kwargs: Keyword arguments which are passed to the
|
||||
callback.
|
||||
|
||||
:typed response: :class:`Response`
|
||||
:param response: Response instance.
|
||||
|
||||
:type success_status_code: ``int``
|
||||
:param success_status_code: Status code which represents a successful
|
||||
transfer (defaults to httplib.OK)
|
||||
|
||||
:return: ``True`` on success, ``False`` otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
success_status_code = success_status_code or httplib.OK
|
||||
|
||||
if response.status == success_status_code:
|
||||
return callback(**callback_kwargs)
|
||||
elif response.status == httplib.NOT_FOUND:
|
||||
raise ObjectDoesNotExistError(object_name=obj.name,
|
||||
value='', driver=self)
|
||||
|
||||
raise LibcloudError(value='Unexpected status code: %s' %
|
||||
(response.status),
|
||||
driver=self)
|
||||
|
||||
def _save_object(self, response, obj, destination_path,
|
||||
overwrite_existing=False, delete_on_failure=True,
|
||||
chunk_size=None):
|
||||
"""
|
||||
Save object to the provided path.
|
||||
|
||||
:type response: :class:`RawResponse`
|
||||
:param response: RawResponse instance.
|
||||
|
||||
:type obj: :class:`Object`
|
||||
:param obj: Object instance.
|
||||
|
||||
:type destination_path: ``str``
|
||||
:param destination_path: Destination directory.
|
||||
|
||||
:type delete_on_failure: ``bool``
|
||||
:param delete_on_failure: True to delete partially downloaded object if
|
||||
the download fails.
|
||||
|
||||
:type overwrite_existing: ``bool``
|
||||
:param overwrite_existing: True to overwrite a local path if it already
|
||||
exists.
|
||||
|
||||
:type chunk_size: ``int``
|
||||
:param chunk_size: Optional chunk size
|
||||
(defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)
|
||||
|
||||
:return: ``True`` on success, ``False`` otherwise.
|
||||
:rtype: ``bool``
|
||||
"""
|
||||
|
||||
chunk_size = chunk_size or CHUNK_SIZE
|
||||
|
||||
base_name = os.path.basename(destination_path)
|
||||
|
||||
if not base_name and not os.path.exists(destination_path):
|
||||
raise LibcloudError(
|
||||
value='Path %s does not exist' % (destination_path),
|
||||
driver=self)
|
||||
|
||||
if not base_name:
|
||||
file_path = pjoin(destination_path, obj.name)
|
||||
else:
|
||||
file_path = destination_path
|
||||
|
||||
if os.path.exists(file_path) and not overwrite_existing:
|
||||
raise LibcloudError(
|
||||
value='File %s already exists, but ' % (file_path) +
|
||||
'overwrite_existing=False',
|
||||
driver=self)
|
||||
|
||||
stream = libcloud.utils.files.read_in_chunks(response, chunk_size)
|
||||
|
||||
try:
|
||||
data_read = next(stream)
|
||||
except StopIteration:
|
||||
# Empty response?
|
||||
return False
|
||||
|
||||
bytes_transferred = 0
|
||||
|
||||
with open(file_path, 'wb') as file_handle:
|
||||
while len(data_read) > 0:
|
||||
file_handle.write(b(data_read))
|
||||
bytes_transferred += len(data_read)
|
||||
|
||||
try:
|
||||
data_read = next(stream)
|
||||
except StopIteration:
|
||||
data_read = ''
|
||||
|
||||
if int(obj.size) != int(bytes_transferred):
|
||||
# Transfer failed, support retry?
|
||||
if delete_on_failure:
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _upload_object(self, object_name, content_type, upload_func,
|
||||
upload_func_kwargs, request_path, request_method='PUT',
|
||||
headers=None, file_path=None, iterator=None):
|
||||
"""
|
||||
Helper function for setting common request headers and calling the
|
||||
passed in callback which uploads an object.
|
||||
"""
|
||||
headers = headers or {}
|
||||
|
||||
if file_path and not os.path.exists(file_path):
|
||||
raise OSError('File %s does not exist' % (file_path))
|
||||
|
||||
if iterator is not None and not hasattr(iterator, 'next') and not \
|
||||
hasattr(iterator, '__next__'):
|
||||
raise AttributeError('iterator object must implement next() ' +
|
||||
'method.')
|
||||
|
||||
if not content_type:
|
||||
if file_path:
|
||||
name = file_path
|
||||
else:
|
||||
name = object_name
|
||||
content_type, _ = libcloud.utils.files.guess_file_mime_type(name)
|
||||
|
||||
if not content_type:
|
||||
if self.strict_mode:
|
||||
raise AttributeError('File content-type could not be '
|
||||
'guessed and no content_type value '
|
||||
'is provided')
|
||||
else:
|
||||
# Fallback to a content-type
|
||||
content_type = DEFAULT_CONTENT_TYPE
|
||||
|
||||
file_size = None
|
||||
|
||||
if iterator:
|
||||
if self.supports_chunked_encoding:
|
||||
headers['Transfer-Encoding'] = 'chunked'
|
||||
upload_func_kwargs['chunked'] = True
|
||||
else:
|
||||
# Chunked transfer encoding is not supported. Need to buffer
|
||||
# all the data in memory so we can determine file size.
|
||||
iterator = libcloud.utils.files.read_in_chunks(
|
||||
iterator=iterator)
|
||||
data = libcloud.utils.files.exhaust_iterator(iterator=iterator)
|
||||
|
||||
file_size = len(data)
|
||||
upload_func_kwargs['data'] = data
|
||||
else:
|
||||
file_size = os.path.getsize(file_path)
|
||||
upload_func_kwargs['chunked'] = False
|
||||
|
||||
if file_size is not None and 'Content-Length' not in headers:
|
||||
headers['Content-Length'] = file_size
|
||||
|
||||
headers['Content-Type'] = content_type
|
||||
response = self.connection.request(request_path,
|
||||
method=request_method, data=None,
|
||||
headers=headers, raw=True)
|
||||
|
||||
upload_func_kwargs['response'] = response
|
||||
success, data_hash, bytes_transferred = upload_func(
|
||||
**upload_func_kwargs)
|
||||
|
||||
if not success:
|
||||
raise LibcloudError(
|
||||
value='Object upload failed, Perhaps a timeout?', driver=self)
|
||||
|
||||
result_dict = {'response': response, 'data_hash': data_hash,
|
||||
'bytes_transferred': bytes_transferred}
|
||||
return result_dict
|
||||
|
||||
def _upload_data(self, response, data, calculate_hash=True):
|
||||
"""
|
||||
Upload data stored in a string.
|
||||
|
||||
:type response: :class:`RawResponse`
|
||||
:param response: RawResponse object.
|
||||
|
||||
:type data: ``str``
|
||||
:param data: Data to upload.
|
||||
|
||||
:type calculate_hash: ``bool``
|
||||
:param calculate_hash: True to calculate hash of the transferred data.
|
||||
(defauls to True).
|
||||
|
||||
:rtype: ``tuple``
|
||||
:return: First item is a boolean indicator of success, second
|
||||
one is the uploaded data MD5 hash and the third one
|
||||
is the number of transferred bytes.
|
||||
"""
|
||||
bytes_transferred = 0
|
||||
data_hash = None
|
||||
|
||||
if calculate_hash:
|
||||
data_hash = self._get_hash_function()
|
||||
data_hash.update(b(data))
|
||||
|
||||
try:
|
||||
response.connection.connection.send(b(data))
|
||||
except Exception:
|
||||
# TODO: let this exception propagate
|
||||
# Timeout, etc.
|
||||
return False, None, bytes_transferred
|
||||
|
||||
bytes_transferred = len(data)
|
||||
|
||||
if calculate_hash:
|
||||
data_hash = data_hash.hexdigest()
|
||||
|
||||
return True, data_hash, bytes_transferred
|
||||
|
||||
def _stream_data(self, response, iterator, chunked=False,
|
||||
calculate_hash=True, chunk_size=None, data=None):
|
||||
"""
|
||||
Stream a data over an http connection.
|
||||
|
||||
:type response: :class:`RawResponse`
|
||||
:param response: RawResponse object.
|
||||
|
||||
:type iterator: :class:`object`
|
||||
:param response: An object which implements an iterator interface
|
||||
or a File like object with read method.
|
||||
|
||||
:type chunked: ``bool``
|
||||
:param chunked: True if the chunked transfer encoding should be used
|
||||
(defauls to False).
|
||||
|
||||
:type calculate_hash: ``bool``
|
||||
:param calculate_hash: True to calculate hash of the transferred data.
|
||||
(defauls to True).
|
||||
|
||||
:type chunk_size: ``int``
|
||||
:param chunk_size: Optional chunk size (defaults to ``CHUNK_SIZE``)
|
||||
|
||||
:rtype: ``tuple``
|
||||
:return: First item is a boolean indicator of success, second
|
||||
one is the uploaded data MD5 hash and the third one
|
||||
is the number of transferred bytes.
|
||||
"""
|
||||
|
||||
chunk_size = chunk_size or CHUNK_SIZE
|
||||
|
||||
data_hash = None
|
||||
if calculate_hash:
|
||||
data_hash = self._get_hash_function()
|
||||
|
||||
generator = libcloud.utils.files.read_in_chunks(iterator, chunk_size)
|
||||
|
||||
bytes_transferred = 0
|
||||
try:
|
||||
chunk = next(generator)
|
||||
except StopIteration:
|
||||
# Special case when StopIteration is thrown on the first iteration
|
||||
# create a 0-byte long object
|
||||
chunk = ''
|
||||
if chunked:
|
||||
response.connection.connection.send(b('%X\r\n' %
|
||||
(len(chunk))))
|
||||
response.connection.connection.send(chunk)
|
||||
response.connection.connection.send(b('\r\n'))
|
||||
response.connection.connection.send(b('0\r\n\r\n'))
|
||||
else:
|
||||
response.connection.connection.send(chunk)
|
||||
return True, data_hash.hexdigest(), bytes_transferred
|
||||
|
||||
while len(chunk) > 0:
|
||||
try:
|
||||
if chunked:
|
||||
response.connection.connection.send(b('%X\r\n' %
|
||||
(len(chunk))))
|
||||
response.connection.connection.send(b(chunk))
|
||||
response.connection.connection.send(b('\r\n'))
|
||||
else:
|
||||
response.connection.connection.send(b(chunk))
|
||||
except Exception:
|
||||
# TODO: let this exception propagate
|
||||
# Timeout, etc.
|
||||
return False, None, bytes_transferred
|
||||
|
||||
bytes_transferred += len(chunk)
|
||||
if calculate_hash:
|
||||
data_hash.update(b(chunk))
|
||||
|
||||
try:
|
||||
chunk = next(generator)
|
||||
except StopIteration:
|
||||
chunk = ''
|
||||
|
||||
if chunked:
|
||||
response.connection.connection.send(b('0\r\n\r\n'))
|
||||
|
||||
if calculate_hash:
|
||||
data_hash = data_hash.hexdigest()
|
||||
|
||||
return True, data_hash, bytes_transferred
|
||||
|
||||
def _upload_file(self, response, file_path, chunked=False,
|
||||
calculate_hash=True):
|
||||
"""
|
||||
Upload a file to the server.
|
||||
|
||||
:type response: :class:`RawResponse`
|
||||
:param response: RawResponse object.
|
||||
|
||||
:type file_path: ``str``
|
||||
:param file_path: Path to a local file.
|
||||
|
||||
:type iterator: :class:`object`
|
||||
:param response: An object which implements an iterator interface (File
|
||||
object, etc.)
|
||||
|
||||
:rtype: ``tuple``
|
||||
:return: First item is a boolean indicator of success, second
|
||||
one is the uploaded data MD5 hash and the third one
|
||||
is the number of transferred bytes.
|
||||
"""
|
||||
with open(file_path, 'rb') as file_handle:
|
||||
success, data_hash, bytes_transferred = (
|
||||
self._stream_data(
|
||||
response=response,
|
||||
iterator=iter(file_handle),
|
||||
chunked=chunked,
|
||||
calculate_hash=calculate_hash))
|
||||
|
||||
return success, data_hash, bytes_transferred
|
||||
|
||||
def _get_hash_function(self):
|
||||
"""
|
||||
Return instantiated hash function for the hash type supported by
|
||||
the provider.
|
||||
"""
|
||||
try:
|
||||
func = getattr(hashlib, self.hash_type)()
|
||||
except AttributeError:
|
||||
raise RuntimeError('Invalid or unsupported hash type: %s' %
|
||||
(self.hash_type))
|
||||
|
||||
return func
|
||||
23
awx/lib/site-packages/libcloud/storage/drivers/__init__.py
Normal file
23
awx/lib/site-packages/libcloud/storage/drivers/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Drivers for working with different providers
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'dummy',
|
||||
'cloudfiles'
|
||||
]
|
||||
472
awx/lib/site-packages/libcloud/storage/drivers/atmos.py
Normal file
472
awx/lib/site-packages/libcloud/storage/drivers/atmos.py
Normal file
@@ -0,0 +1,472 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import time
|
||||
|
||||
from libcloud.utils.py3 import PY3
|
||||
from libcloud.utils.py3 import b
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import next
|
||||
from libcloud.utils.py3 import urlparse
|
||||
from libcloud.utils.py3 import urlencode
|
||||
from libcloud.utils.py3 import urlquote
|
||||
from libcloud.utils.py3 import urlunquote
|
||||
|
||||
if PY3:
|
||||
from io import FileIO as file
|
||||
|
||||
from libcloud.utils.files import read_in_chunks, guess_file_mime_type
|
||||
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
|
||||
from libcloud.common.types import LibcloudError
|
||||
|
||||
from libcloud.storage.base import Object, Container, StorageDriver, CHUNK_SIZE
|
||||
from libcloud.storage.types import ContainerAlreadyExistsError, \
|
||||
ContainerDoesNotExistError, ContainerIsNotEmptyError, \
|
||||
ObjectDoesNotExistError
|
||||
|
||||
|
||||
def collapse(s):
|
||||
return ' '.join([x for x in s.split(' ') if x])
|
||||
|
||||
|
||||
class AtmosError(LibcloudError):
|
||||
def __init__(self, code, message, driver=None):
|
||||
super(AtmosError, self).__init__(value=message, driver=driver)
|
||||
self.code = code
|
||||
|
||||
|
||||
class AtmosResponse(XmlResponse):
|
||||
def success(self):
|
||||
return self.status in (httplib.OK, httplib.CREATED, httplib.NO_CONTENT,
|
||||
httplib.PARTIAL_CONTENT)
|
||||
|
||||
def parse_error(self):
|
||||
tree = self.parse_body()
|
||||
|
||||
if tree is None:
|
||||
return None
|
||||
|
||||
code = int(tree.find('Code').text)
|
||||
message = tree.find('Message').text
|
||||
raise AtmosError(code=code, message=message,
|
||||
driver=self.connection.driver)
|
||||
|
||||
|
||||
class AtmosConnection(ConnectionUserAndKey):
|
||||
responseCls = AtmosResponse
|
||||
|
||||
def add_default_headers(self, headers):
|
||||
headers['x-emc-uid'] = self.user_id
|
||||
headers['Date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
|
||||
time.gmtime())
|
||||
headers['x-emc-date'] = headers['Date']
|
||||
|
||||
if 'Content-Type' not in headers:
|
||||
headers['Content-Type'] = 'application/octet-stream'
|
||||
if 'Accept' not in headers:
|
||||
headers['Accept'] = '*/*'
|
||||
|
||||
return headers
|
||||
|
||||
def pre_connect_hook(self, params, headers):
|
||||
headers['x-emc-signature'] = self._calculate_signature(params, headers)
|
||||
|
||||
return params, headers
|
||||
|
||||
def _calculate_signature(self, params, headers):
|
||||
pathstring = urlunquote(self.action)
|
||||
if pathstring.startswith(self.driver.path):
|
||||
pathstring = pathstring[len(self.driver.path):]
|
||||
if params:
|
||||
if type(params) is dict:
|
||||
params = list(params.items())
|
||||
pathstring += '?' + urlencode(params)
|
||||
pathstring = pathstring.lower()
|
||||
|
||||
xhdrs = [(k, v) for k, v in list(headers.items()) if
|
||||
k.startswith('x-emc-')]
|
||||
xhdrs.sort(key=lambda x: x[0])
|
||||
|
||||
signature = [
|
||||
self.method,
|
||||
headers.get('Content-Type', ''),
|
||||
headers.get('Range', ''),
|
||||
headers.get('Date', ''),
|
||||
pathstring,
|
||||
]
|
||||
signature.extend([k + ':' + collapse(v) for k, v in xhdrs])
|
||||
signature = '\n'.join(signature)
|
||||
key = base64.b64decode(self.key)
|
||||
signature = hmac.new(b(key), b(signature), hashlib.sha1).digest()
|
||||
return base64.b64encode(b(signature)).decode('utf-8')
|
||||
|
||||
|
||||
class AtmosDriver(StorageDriver):
|
||||
connectionCls = AtmosConnection
|
||||
|
||||
host = None
|
||||
path = None
|
||||
api_name = 'atmos'
|
||||
supports_chunked_encoding = True
|
||||
website = 'http://atmosonline.com/'
|
||||
name = 'atmos'
|
||||
|
||||
DEFAULT_CDN_TTL = 60 * 60 * 24 * 7 # 1 week
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None):
|
||||
host = host or self.host
|
||||
super(AtmosDriver, self).__init__(key, secret, secure, host, port)
|
||||
|
||||
def iterate_containers(self):
|
||||
result = self.connection.request(self._namespace_path(''))
|
||||
entries = self._list_objects(result.object, object_type='directory')
|
||||
for entry in entries:
|
||||
extra = {
|
||||
'object_id': entry['id']
|
||||
}
|
||||
yield Container(entry['name'], extra, self)
|
||||
|
||||
def get_container(self, container_name):
|
||||
path = self._namespace_path(container_name) + '/?metadata/system'
|
||||
try:
|
||||
result = self.connection.request(path)
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code != 1003:
|
||||
raise
|
||||
raise ContainerDoesNotExistError(e, self, container_name)
|
||||
meta = self._emc_meta(result)
|
||||
extra = {
|
||||
'object_id': meta['objectid']
|
||||
}
|
||||
return Container(container_name, extra, self)
|
||||
|
||||
def create_container(self, container_name):
|
||||
path = self._namespace_path(container_name) + '/'
|
||||
try:
|
||||
self.connection.request(path, method='POST')
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code != 1016:
|
||||
raise
|
||||
raise ContainerAlreadyExistsError(e, self, container_name)
|
||||
return self.get_container(container_name)
|
||||
|
||||
def delete_container(self, container):
|
||||
try:
|
||||
self.connection.request(self._namespace_path(container.name) + '/',
|
||||
method='DELETE')
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code == 1003:
|
||||
raise ContainerDoesNotExistError(e, self, container.name)
|
||||
elif e.code == 1023:
|
||||
raise ContainerIsNotEmptyError(e, self, container.name)
|
||||
return True
|
||||
|
||||
def get_object(self, container_name, object_name):
|
||||
container = self.get_container(container_name)
|
||||
object_name_cleaned = self._clean_object_name(object_name)
|
||||
path = self._namespace_path(container_name) + '/' + object_name_cleaned
|
||||
|
||||
try:
|
||||
result = self.connection.request(path + '?metadata/system')
|
||||
system_meta = self._emc_meta(result)
|
||||
|
||||
result = self.connection.request(path + '?metadata/user')
|
||||
user_meta = self._emc_meta(result)
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code != 1003:
|
||||
raise
|
||||
raise ObjectDoesNotExistError(e, self, object_name)
|
||||
|
||||
last_modified = time.strptime(system_meta['mtime'],
|
||||
'%Y-%m-%dT%H:%M:%SZ')
|
||||
last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
|
||||
last_modified)
|
||||
extra = {
|
||||
'object_id': system_meta['objectid'],
|
||||
'last_modified': last_modified
|
||||
}
|
||||
data_hash = user_meta.pop('md5', '')
|
||||
return Object(object_name, int(system_meta['size']), data_hash, extra,
|
||||
user_meta, container, self)
|
||||
|
||||
def upload_object(self, file_path, container, object_name, extra=None,
|
||||
verify_hash=True):
|
||||
upload_func = self._upload_file
|
||||
upload_func_kwargs = {'file_path': file_path}
|
||||
method = 'PUT'
|
||||
|
||||
extra = extra or {}
|
||||
object_name_cleaned = self._clean_object_name(object_name)
|
||||
request_path = self._namespace_path(container.name) + '/' +\
|
||||
object_name_cleaned
|
||||
content_type = extra.get('content_type', None)
|
||||
|
||||
try:
|
||||
self.connection.request(request_path + '?metadata/system')
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code != 1003:
|
||||
raise
|
||||
method = 'POST'
|
||||
|
||||
result_dict = self._upload_object(
|
||||
object_name=object_name,
|
||||
content_type=content_type,
|
||||
upload_func=upload_func,
|
||||
upload_func_kwargs=upload_func_kwargs,
|
||||
request_path=request_path,
|
||||
request_method=method,
|
||||
headers={}, file_path=file_path)
|
||||
|
||||
bytes_transferred = result_dict['bytes_transferred']
|
||||
|
||||
if extra is None:
|
||||
meta_data = {}
|
||||
else:
|
||||
meta_data = extra.get('meta_data', {})
|
||||
meta_data['md5'] = result_dict['data_hash']
|
||||
user_meta = ', '.join([k + '=' + str(v) for k, v in
|
||||
list(meta_data.items())])
|
||||
self.connection.request(request_path + '?metadata/user', method='POST',
|
||||
headers={'x-emc-meta': user_meta})
|
||||
result = self.connection.request(request_path + '?metadata/system')
|
||||
meta = self._emc_meta(result)
|
||||
del meta_data['md5']
|
||||
extra = {
|
||||
'object_id': meta['objectid'],
|
||||
'meta_data': meta_data,
|
||||
}
|
||||
|
||||
return Object(object_name, bytes_transferred, result_dict['data_hash'],
|
||||
extra, meta_data, container, self)
|
||||
|
||||
def upload_object_via_stream(self, iterator, container, object_name,
|
||||
extra=None):
|
||||
if isinstance(iterator, file):
|
||||
iterator = iter(iterator)
|
||||
|
||||
data_hash = hashlib.md5()
|
||||
generator = read_in_chunks(iterator, CHUNK_SIZE, True)
|
||||
bytes_transferred = 0
|
||||
try:
|
||||
chunk = next(generator)
|
||||
except StopIteration:
|
||||
chunk = ''
|
||||
|
||||
path = self._namespace_path(container.name + '/' + object_name)
|
||||
method = 'PUT'
|
||||
|
||||
if extra is not None:
|
||||
content_type = extra.get('content_type', None)
|
||||
else:
|
||||
content_type = None
|
||||
if not content_type:
|
||||
content_type, _ = guess_file_mime_type(object_name)
|
||||
|
||||
if not content_type:
|
||||
raise AttributeError(
|
||||
'File content-type could not be guessed and' +
|
||||
' no content_type value provided')
|
||||
|
||||
try:
|
||||
self.connection.request(path + '?metadata/system')
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code != 1003:
|
||||
raise
|
||||
method = 'POST'
|
||||
|
||||
while True:
|
||||
end = bytes_transferred + len(chunk) - 1
|
||||
data_hash.update(b(chunk))
|
||||
headers = {
|
||||
'x-emc-meta': 'md5=' + data_hash.hexdigest(),
|
||||
'Content-Type': content_type,
|
||||
}
|
||||
|
||||
if len(chunk) > 0 and bytes_transferred > 0:
|
||||
headers['Range'] = 'Bytes=%d-%d' % (bytes_transferred, end)
|
||||
method = 'PUT'
|
||||
|
||||
result = self.connection.request(path, method=method, data=chunk,
|
||||
headers=headers)
|
||||
bytes_transferred += len(chunk)
|
||||
|
||||
try:
|
||||
chunk = next(generator)
|
||||
except StopIteration:
|
||||
break
|
||||
if len(chunk) == 0:
|
||||
break
|
||||
|
||||
data_hash = data_hash.hexdigest()
|
||||
|
||||
if extra is None:
|
||||
meta_data = {}
|
||||
else:
|
||||
meta_data = extra.get('meta_data', {})
|
||||
meta_data['md5'] = data_hash
|
||||
user_meta = ', '.join([k + '=' + str(v) for k, v in
|
||||
list(meta_data.items())])
|
||||
self.connection.request(path + '?metadata/user', method='POST',
|
||||
headers={'x-emc-meta': user_meta})
|
||||
|
||||
result = self.connection.request(path + '?metadata/system')
|
||||
|
||||
meta = self._emc_meta(result)
|
||||
extra = {
|
||||
'object_id': meta['objectid'],
|
||||
'meta_data': meta_data,
|
||||
}
|
||||
|
||||
return Object(object_name, bytes_transferred, data_hash, extra,
|
||||
meta_data, container, self)
|
||||
|
||||
def download_object(self, obj, destination_path, overwrite_existing=False,
|
||||
delete_on_failure=True):
|
||||
path = self._namespace_path(obj.container.name + '/' + obj.name)
|
||||
response = self.connection.request(path, method='GET', raw=True)
|
||||
|
||||
return self._get_object(obj=obj, callback=self._save_object,
|
||||
response=response,
|
||||
callback_kwargs={
|
||||
'obj': obj,
|
||||
'response': response.response,
|
||||
'destination_path': destination_path,
|
||||
'overwrite_existing': overwrite_existing,
|
||||
'delete_on_failure': delete_on_failure
|
||||
},
|
||||
success_status_code=httplib.OK)
|
||||
|
||||
def download_object_as_stream(self, obj, chunk_size=None):
|
||||
path = self._namespace_path(obj.container.name + '/' + obj.name)
|
||||
response = self.connection.request(path, method='GET', raw=True)
|
||||
|
||||
return self._get_object(obj=obj, callback=read_in_chunks,
|
||||
response=response,
|
||||
callback_kwargs={
|
||||
'iterator': response.response,
|
||||
'chunk_size': chunk_size
|
||||
},
|
||||
success_status_code=httplib.OK)
|
||||
|
||||
def delete_object(self, obj):
|
||||
path = self._namespace_path(obj.container.name) + '/' +\
|
||||
self._clean_object_name(obj.name)
|
||||
try:
|
||||
self.connection.request(path, method='DELETE')
|
||||
except AtmosError:
|
||||
e = sys.exc_info()[1]
|
||||
if e.code != 1003:
|
||||
raise
|
||||
raise ObjectDoesNotExistError(e, self, obj.name)
|
||||
return True
|
||||
|
||||
def enable_object_cdn(self, obj):
|
||||
return True
|
||||
|
||||
def get_object_cdn_url(self, obj, expiry=None, use_object=False):
|
||||
"""
|
||||
Return a object CDN URL.
|
||||
|
||||
:param obj: Object instance
|
||||
:type obj: :class:`Object`
|
||||
|
||||
:param expiry: Expiry
|
||||
:type expiry: ``str``
|
||||
|
||||
:param use_object: Use object
|
||||
:type use_object: ``bool``
|
||||
|
||||
:rtype: ``str``
|
||||
"""
|
||||
if use_object:
|
||||
path = '/rest/objects' + obj.meta_data['object_id']
|
||||
else:
|
||||
path = '/rest/namespace/' + obj.container.name + '/' + obj.name
|
||||
|
||||
if self.secure:
|
||||
protocol = 'https'
|
||||
else:
|
||||
protocol = 'http'
|
||||
|
||||
expiry = str(expiry or int(time.time()) + self.DEFAULT_CDN_TTL)
|
||||
params = [
|
||||
('uid', self.key),
|
||||
('expires', expiry),
|
||||
]
|
||||
params.append(('signature', self._cdn_signature(path, params, expiry)))
|
||||
|
||||
params = urlencode(params)
|
||||
path = self.path + path
|
||||
return urlparse.urlunparse((protocol, self.host, path, '', params, ''))
|
||||
|
||||
def _cdn_signature(self, path, params, expiry):
|
||||
key = base64.b64decode(self.secret)
|
||||
signature = '\n'.join(['GET', path.lower(), self.key, expiry])
|
||||
signature = hmac.new(key, signature, hashlib.sha1).digest()
|
||||
|
||||
return base64.b64encode(signature)
|
||||
|
||||
def _list_objects(self, tree, object_type=None):
|
||||
listing = tree.find(self._emc_tag('DirectoryList'))
|
||||
entries = []
|
||||
for entry in listing.findall(self._emc_tag('DirectoryEntry')):
|
||||
file_type = entry.find(self._emc_tag('FileType')).text
|
||||
if object_type is not None and object_type != file_type:
|
||||
continue
|
||||
entries.append({
|
||||
'id': entry.find(self._emc_tag('ObjectID')).text,
|
||||
'type': file_type,
|
||||
'name': entry.find(self._emc_tag('Filename')).text
|
||||
})
|
||||
return entries
|
||||
|
||||
def _clean_object_name(self, name):
|
||||
return urlquote(name.encode('ascii'))
|
||||
|
||||
def _namespace_path(self, path):
|
||||
return self.path + '/rest/namespace/' + urlquote(path.encode('ascii'))
|
||||
|
||||
def _object_path(self, object_id):
|
||||
return self.path + '/rest/objects/' + object_id.encode('ascii')
|
||||
|
||||
@staticmethod
|
||||
def _emc_tag(tag):
|
||||
return '{http://www.emc.com/cos/}' + tag
|
||||
|
||||
def _emc_meta(self, response):
|
||||
meta = response.headers.get('x-emc-meta', '')
|
||||
if len(meta) == 0:
|
||||
return {}
|
||||
meta = meta.split(', ')
|
||||
return dict([x.split('=', 1) for x in meta])
|
||||
|
||||
def iterate_container_objects(self, container):
|
||||
headers = {'x-emc-include-meta': '1'}
|
||||
path = self._namespace_path(container.name) + '/'
|
||||
result = self.connection.request(path, headers=headers)
|
||||
entries = self._list_objects(result.object, object_type='regular')
|
||||
for entry in entries:
|
||||
metadata = {'object_id': entry['id']}
|
||||
yield Object(entry['name'], 0, '', {}, metadata, container, self)
|
||||
986
awx/lib/site-packages/libcloud/storage/drivers/azure_blobs.py
Normal file
986
awx/lib/site-packages/libcloud/storage/drivers/azure_blobs.py
Normal file
@@ -0,0 +1,986 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import base64
|
||||
import os
|
||||
import binascii
|
||||
|
||||
from xml.etree.ElementTree import Element, SubElement
|
||||
|
||||
from libcloud.utils.py3 import PY3
|
||||
from libcloud.utils.py3 import httplib
|
||||
from libcloud.utils.py3 import urlquote
|
||||
from libcloud.utils.py3 import tostring
|
||||
from libcloud.utils.py3 import b
|
||||
|
||||
from libcloud.utils.xml import fixxpath
|
||||
from libcloud.utils.files import read_in_chunks
|
||||
from libcloud.common.types import LibcloudError
|
||||
from libcloud.common.azure import AzureConnection
|
||||
|
||||
from libcloud.storage.base import Object, Container, StorageDriver
|
||||
from libcloud.storage.types import ContainerIsNotEmptyError
|
||||
from libcloud.storage.types import ContainerAlreadyExistsError
|
||||
from libcloud.storage.types import InvalidContainerNameError
|
||||
from libcloud.storage.types import ContainerDoesNotExistError
|
||||
from libcloud.storage.types import ObjectDoesNotExistError
|
||||
from libcloud.storage.types import ObjectHashMismatchError
|
||||
|
||||
if PY3:
|
||||
from io import FileIO as file
|
||||
|
||||
# Desired number of items in each response inside a paginated request
|
||||
RESPONSES_PER_REQUEST = 100
|
||||
|
||||
# As per the Azure documentation, if the upload file size is less than
|
||||
# 64MB, we can upload it in a single request. However, in real life azure
|
||||
# servers seem to disconnect randomly after around 5 MB or 200s of upload.
|
||||
# So, it is better that for file sizes greater than 4MB, we upload it in
|
||||
# chunks.
|
||||
# Also, with large sizes, if we use a lease, the lease will timeout after
|
||||
# 60 seconds, but the upload might still be in progress. This can be
|
||||
# handled in code, but if we use chunked uploads, the lease renewal will
|
||||
# happen automatically.
|
||||
AZURE_BLOCK_MAX_SIZE = 4 * 1024 * 1024
|
||||
|
||||
# Azure block blocks must be maximum 4MB
|
||||
# Azure page blobs must be aligned in 512 byte boundaries (4MB fits that)
|
||||
AZURE_CHUNK_SIZE = 4 * 1024 * 1024
|
||||
|
||||
# Azure page blob must be aligned in 512 byte boundaries
|
||||
AZURE_PAGE_CHUNK_SIZE = 512
|
||||
|
||||
# The time period (in seconds) for which a lease must be obtained.
|
||||
# If set as -1, we get an infinite lease, but that is a bad idea. If
|
||||
# after getting an infinite lease, there was an issue in releasing the
|
||||
# lease, the object will remain 'locked' forever, unless the lease is
|
||||
# released using the lease_id (which is not exposed to the user)
|
||||
AZURE_LEASE_PERIOD = 60
|
||||
|
||||
AZURE_STORAGE_HOST_SUFFIX = 'blob.core.windows.net'
|
||||
|
||||
|
||||
class AzureBlobLease(object):
|
||||
"""
|
||||
A class to help in leasing an azure blob and renewing the lease
|
||||
"""
|
||||
def __init__(self, driver, object_path, use_lease):
|
||||
"""
|
||||
:param driver: The Azure storage driver that is being used
|
||||
:type driver: :class:`AzureStorageDriver`
|
||||
|
||||
:param object_path: The path of the object we need to lease
|
||||
:type object_path: ``str``
|
||||
|
||||
:param use_lease: Indicates if we must take a lease or not
|
||||
:type use_lease: ``bool``
|
||||
"""
|
||||
self.object_path = object_path
|
||||
self.driver = driver
|
||||
self.use_lease = use_lease
|
||||
self.lease_id = None
|
||||
self.params = {'comp': 'lease'}
|
||||
|
||||
def renew(self):
|
||||
"""
|
||||
Renew the lease if it is older than a predefined time period
|
||||
"""
|
||||
if self.lease_id is None:
|
||||
return
|
||||
|
||||
headers = {'x-ms-lease-action': 'renew',
|
||||
'x-ms-lease-id': self.lease_id,
|
||||
'x-ms-lease-duration': '60'}
|
||||
|
||||
response = self.driver.connection.request(self.object_path,
|
||||
headers=headers,
|
||||
params=self.params,
|
||||
method='PUT')
|
||||
|
||||
if response.status != httplib.OK:
|
||||
raise LibcloudError('Unable to obtain lease', driver=self)
|
||||
|
||||
def update_headers(self, headers):
|
||||
"""
|
||||
Update the lease id in the headers
|
||||
"""
|
||||
if self.lease_id:
|
||||
headers['x-ms-lease-id'] = self.lease_id
|
||||
|
||||
def __enter__(self):
|
||||
if not self.use_lease:
|
||||
return self
|
||||
|
||||
headers = {'x-ms-lease-action': 'acquire',
|
||||
'x-ms-lease-duration': '60'}
|
||||
|
||||
response = self.driver.connection.request(self.object_path,
|
||||
headers=headers,
|
||||
params=self.params,
|
||||
method='PUT')
|
||||
|
||||
if response.status == httplib.NOT_FOUND:
|
||||
return self
|
||||
elif response.status != httplib.CREATED:
|
||||
raise LibcloudError('Unable to obtain lease', driver=self)
|
||||
|
||||
self.lease_id = response.headers['x-ms-lease-id']
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self.lease_id is None:
|
||||
return
|
||||
|
||||
headers = {'x-ms-lease-action': 'release',
|
||||
'x-ms-lease-id': self.lease_id}
|
||||
response = self.driver.connection.request(self.object_path,
|
||||
headers=headers,
|
||||
params=self.params,
|
||||
method='PUT')
|
||||
|
||||
if response.status != httplib.OK:
|
||||
raise LibcloudError('Unable to release lease', driver=self)
|
||||
|
||||
|
||||
class AzureBlobsConnection(AzureConnection):
|
||||
"""
|
||||
Represents a single connection to Azure Blobs
|
||||
"""
|
||||
|
||||
|
||||
class AzureBlobsStorageDriver(StorageDriver):
|
||||
name = 'Microsoft Azure (blobs)'
|
||||
website = 'http://windows.azure.com/'
|
||||
connectionCls = AzureBlobsConnection
|
||||
hash_type = 'md5'
|
||||
supports_chunked_encoding = False
|
||||
ex_blob_type = 'BlockBlob'
|
||||
|
||||
def __init__(self, key, secret=None, secure=True, host=None, port=None,
|
||||
**kwargs):
|
||||
self._host_argument_set = bool(host)
|
||||
|
||||
# B64decode() this key and keep it, so that we don't have to do
|
||||
# so for every request. Minor performance improvement
|
||||
secret = base64.b64decode(b(secret))
|
||||
|
||||
super(AzureBlobsStorageDriver, self).__init__(key=key, secret=secret,
|
||||
secure=secure, host=host,
|
||||
port=port, **kwargs)
|
||||
|
||||
def _ex_connection_class_kwargs(self):
|
||||
result = {}
|
||||
|
||||
# host argument has precedence
|
||||
if not self._host_argument_set:
|
||||
result['host'] = '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX)
|
||||
|
||||
return result
|
||||
|
||||
def _xml_to_container(self, node):
|
||||
"""
|
||||
Converts a container XML node to a container instance
|
||||
|
||||
:param node: XML info of the container
|
||||
:type node: :class:`xml.etree.ElementTree.Element`
|
||||
|
||||
:return: A container instance
|
||||
:rtype: :class:`Container`
|
||||
"""
|
||||
|
||||
name = node.findtext(fixxpath(xpath='Name'))
|
||||
props = node.find(fixxpath(xpath='Properties'))
|
||||
metadata = node.find(fixxpath(xpath='Metadata'))
|
||||
|
||||
extra = {
|
||||
'url': node.findtext(fixxpath(xpath='Url')),
|
||||
'last_modified': node.findtext(fixxpath(xpath='Last-Modified')),
|
||||
'etag': props.findtext(fixxpath(xpath='Etag')),
|
||||
'lease': {
|
||||
'status': props.findtext(fixxpath(xpath='LeaseStatus')),
|
||||
'state': props.findtext(fixxpath(xpath='LeaseState')),
|
||||
'duration': props.findtext(fixxpath(xpath='LeaseDuration')),
|
||||
},
|
||||
'meta_data': {}
|
||||
}
|
||||
|
||||
for meta in metadata.getchildren():
|
||||
extra['meta_data'][meta.tag] = meta.text
|
||||
|
||||
return Container(name=name, extra=extra, driver=self)
|
||||
|
||||
def _response_to_container(self, container_name, response):
|
||||
"""
|
||||
Converts a HTTP response to a container instance
|
||||
|
||||
:param container_name: Name of the container
|
||||
:type container_name: ``str``
|
||||
|
||||
:param response: HTTP Response
|
||||
:type node: L{}
|
||||
|
||||
:return: A container instance
|
||||
:rtype: :class:`Container`
|
||||
"""
|
||||
|
||||
headers = response.headers
|
||||
extra = {
|
||||
'url': 'http://%s%s' % (response.connection.host,
|
||||
response.connection.action),
|
||||
'etag': headers['etag'],
|
||||
'last_modified': headers['last-modified'],
|
||||
'lease': {
|
||||
'status': headers.get('x-ms-lease-status', None),
|
||||
'state': headers.get('x-ms-lease-state', None),
|
||||
'duration': headers.get('x-ms-lease-duration', None),
|
||||
},
|
||||
'meta_data': {}
|
||||
}
|
||||
|
||||
for key, value in response.headers.items():
|
||||
if key.startswith('x-ms-meta-'):
|
||||
key = key.split('x-ms-meta-')[1]
|
||||
extra['meta_data'][key] = value
|
||||
|
||||
return Container(name=container_name, extra=extra, driver=self)
|
||||
|
||||
def _xml_to_object(self, container, blob):
|
||||
"""
|
||||
Converts a BLOB XML node to an object instance
|
||||
|
||||
:param container: Instance of the container holding the blob
|
||||
:type: :class:`Container`
|
||||
|
||||
:param blob: XML info of the blob
|
||||
:type blob: L{}
|
||||
|
||||
:return: An object instance
|
||||
:rtype: :class:`Object`
|
||||
"""
|
||||
|
||||
name = blob.findtext(fixxpath(xpath='Name'))
|
||||
props = blob.find(fixxpath(xpath='Properties'))
|
||||
metadata = blob.find(fixxpath(xpath='Metadata'))
|
||||
etag = props.findtext(fixxpath(xpath='Etag'))
|
||||
size = int(props.findtext(fixxpath(xpath='Content-Length')))
|
||||
|
||||
extra = {
|
||||
'content_type': props.findtext(fixxpath(xpath='Content-Type')),
|
||||
'etag': etag,
|
||||
'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')),
|
||||
'last_modified': props.findtext(fixxpath(xpath='Last-Modified')),
|
||||
'url': blob.findtext(fixxpath(xpath='Url')),
|
||||
'hash': props.findtext(fixxpath(xpath='Etag')),
|
||||
'lease': {
|
||||
'status': props.findtext(fixxpath(xpath='LeaseStatus')),
|
||||
'state': props.findtext(fixxpath(xpath='LeaseState')),
|
||||
'duration': props.findtext(fixxpath(xpath='LeaseDuration')),
|
||||
},
|
||||
'content_encoding': props.findtext(fixxpath(
|
||||
xpath='Content-Encoding')),
|
||||
'content_language': props.findtext(fixxpath(
|
||||
xpath='Content-Language')),
|
||||
'blob_type': props.findtext(fixxpath(xpath='BlobType'))
|
||||
}
|
||||
|
||||
if extra['md5_hash']:
|
||||
value = binascii.hexlify(base64.b64decode(b(extra['md5_hash'])))
|
||||
value = value.decode('ascii')
|
||||
extra['md5_hash'] = value
|
||||
|
||||
meta_data = {}
|
||||
for meta in metadata.getchildren():
|
||||
meta_data[meta.tag] = meta.text
|
||||
|
||||
return Object(name=name, size=size, hash=etag, meta_data=meta_data,
|
||||
extra=extra, container=container, driver=self)
|
||||
|
||||
def _response_to_object(self, object_name, container, response):
|
||||
"""
|
||||
Converts a HTTP response to an object (from headers)
|
||||
|
||||
:param object_name: Name of the object
|
||||
:type object_name: ``str``
|
||||
|
||||
:param container: Instance of the container holding the blob
|
||||
:type: :class:`Container`
|
||||
|
||||
:param response: HTTP Response
|
||||
:type node: L{}
|
||||
|
||||
:return: An object instance
|
||||
:rtype: :class:`Object`
|
||||
"""
|
||||
|
||||
headers = response.headers
|
||||
size = int(headers['content-length'])
|
||||
etag = headers['etag']
|
||||
|
||||
extra = {
|
||||
'url': 'http://%s%s' % (response.connection.host,
|
||||
response.connection.action),
|
||||
'etag': etag,
|
||||
'md5_hash': headers.get('content-md5', None),
|
||||
'content_type': headers.get('content-type', None),
|
||||
'content_language': headers.get('content-language', None),
|
||||
'content_encoding': headers.get('content-encoding', None),
|
||||
'last_modified': headers['last-modified'],
|
||||
'lease': {
|
||||
'status': headers.get('x-ms-lease-status', None),
|
||||
'state': headers.get('x-ms-lease-state', None),
|
||||
'duration': headers.get('x-ms-lease-duration', None),
|
||||
},
|
||||
'blob_type': headers['x-ms-blob-type']
|
||||
}
|
||||
|
||||
if extra['md5_hash']:
|
||||
value = binascii.hexlify(base64.b64decode(b(extra['md5_hash'])))
|
||||
value = value.decode('ascii')
|
||||
extra['md5_hash'] = value
|
||||
|
||||
meta_data = {}
|
||||
for key, value in response.headers.items():
|
||||
if key.startswith('x-ms-meta-'):
|
||||
key = key.split('x-ms-meta-')[1]
|
||||
meta_data[key] = value
|
||||
|
||||
return Object(name=object_name, size=size, hash=etag, extra=extra,
|
||||
meta_data=meta_data, container=container, driver=self)
|
||||
|
||||
def iterate_containers(self):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.iterate_containers`
|
||||
"""
|
||||
params = {'comp': 'list',
|
||||
'maxresults': RESPONSES_PER_REQUEST,
|
||||
'include': 'metadata'}
|
||||
|
||||
while True:
|
||||
response = self.connection.request('/', params)
|
||||
if response.status != httplib.OK:
|
||||
raise LibcloudError('Unexpected status code: %s' %
|
||||
(response.status), driver=self)
|
||||
|
||||
body = response.parse_body()
|
||||
containers = body.find(fixxpath(xpath='Containers'))
|
||||
containers = containers.findall(fixxpath(xpath='Container'))
|
||||
|
||||
for container in containers:
|
||||
yield self._xml_to_container(container)
|
||||
|
||||
params['marker'] = body.findtext('NextMarker')
|
||||
if not params['marker']:
|
||||
break
|
||||
|
||||
def iterate_container_objects(self, container):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.iterate_container_objects`
|
||||
"""
|
||||
params = {'restype': 'container',
|
||||
'comp': 'list',
|
||||
'maxresults': RESPONSES_PER_REQUEST,
|
||||
'include': 'metadata'}
|
||||
|
||||
container_path = self._get_container_path(container)
|
||||
|
||||
while True:
|
||||
response = self.connection.request(container_path,
|
||||
params=params)
|
||||
|
||||
if response.status == httplib.NOT_FOUND:
|
||||
raise ContainerDoesNotExistError(value=None,
|
||||
driver=self,
|
||||
container_name=container.name)
|
||||
|
||||
elif response.status != httplib.OK:
|
||||
raise LibcloudError('Unexpected status code: %s' %
|
||||
(response.status), driver=self)
|
||||
|
||||
body = response.parse_body()
|
||||
blobs = body.find(fixxpath(xpath='Blobs'))
|
||||
blobs = blobs.findall(fixxpath(xpath='Blob'))
|
||||
|
||||
for blob in blobs:
|
||||
yield self._xml_to_object(container, blob)
|
||||
|
||||
params['marker'] = body.findtext('NextMarker')
|
||||
if not params['marker']:
|
||||
break
|
||||
|
||||
def get_container(self, container_name):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.get_container`
|
||||
"""
|
||||
params = {'restype': 'container'}
|
||||
|
||||
container_path = '/%s' % (container_name)
|
||||
|
||||
response = self.connection.request(container_path, params=params,
|
||||
method='HEAD')
|
||||
|
||||
if response.status == httplib.NOT_FOUND:
|
||||
raise ContainerDoesNotExistError('Container %s does not exist' %
|
||||
(container_name), driver=self,
|
||||
container_name=container_name)
|
||||
elif response.status != httplib.OK:
|
||||
raise LibcloudError('Unexpected status code: %s' %
|
||||
(response.status), driver=self)
|
||||
|
||||
return self._response_to_container(container_name, response)
|
||||
|
||||
def get_object(self, container_name, object_name):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.get_object`
|
||||
"""
|
||||
|
||||
container = self.get_container(container_name=container_name)
|
||||
object_path = self._get_object_path(container, object_name)
|
||||
|
||||
response = self.connection.request(object_path, method='HEAD')
|
||||
|
||||
if response.status == httplib.OK:
|
||||
obj = self._response_to_object(object_name, container, response)
|
||||
return obj
|
||||
|
||||
raise ObjectDoesNotExistError(value=None, driver=self,
|
||||
object_name=object_name)
|
||||
|
||||
def _get_container_path(self, container):
|
||||
"""
|
||||
Return a container path
|
||||
|
||||
:param container: Container instance
|
||||
:type container: :class:`Container`
|
||||
|
||||
:return: A path for this container.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
return '/%s' % (container.name)
|
||||
|
||||
def _get_object_path(self, container, object_name):
|
||||
"""
|
||||
Return an object's CDN path.
|
||||
|
||||
:param container: Container instance
|
||||
:type container: :class:`Container`
|
||||
|
||||
:param object_name: Object name
|
||||
:type object_name: :class:`str`
|
||||
|
||||
:return: A path for this object.
|
||||
:rtype: ``str``
|
||||
"""
|
||||
container_url = self._get_container_path(container)
|
||||
object_name_cleaned = urlquote(object_name)
|
||||
object_path = '%s/%s' % (container_url, object_name_cleaned)
|
||||
return object_path
|
||||
|
||||
def create_container(self, container_name):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.create_container`
|
||||
"""
|
||||
params = {'restype': 'container'}
|
||||
|
||||
container_path = '/%s' % (container_name)
|
||||
response = self.connection.request(container_path, params=params,
|
||||
method='PUT')
|
||||
|
||||
if response.status == httplib.CREATED:
|
||||
return self._response_to_container(container_name, response)
|
||||
elif response.status == httplib.CONFLICT:
|
||||
raise ContainerAlreadyExistsError(
|
||||
value='Container with this name already exists. The name must '
|
||||
'be unique among all the containers in the system',
|
||||
container_name=container_name, driver=self)
|
||||
elif response.status == httplib.BAD_REQUEST:
|
||||
raise InvalidContainerNameError(value='Container name contains ' +
|
||||
'invalid characters.',
|
||||
container_name=container_name,
|
||||
driver=self)
|
||||
|
||||
raise LibcloudError('Unexpected status code: %s' % (response.status),
|
||||
driver=self)
|
||||
|
||||
def delete_container(self, container):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.delete_container`
|
||||
"""
|
||||
# Azure does not check if the container is empty. So, we will do
|
||||
# a check to ensure that the behaviour is similar to other drivers
|
||||
for obj in container.iterate_objects():
|
||||
raise ContainerIsNotEmptyError(
|
||||
value='Container must be empty before it can be deleted.',
|
||||
container_name=container.name, driver=self)
|
||||
|
||||
params = {'restype': 'container'}
|
||||
container_path = self._get_container_path(container)
|
||||
|
||||
# Note: All the objects in the container must be deleted first
|
||||
response = self.connection.request(container_path, params=params,
|
||||
method='DELETE')
|
||||
|
||||
if response.status == httplib.ACCEPTED:
|
||||
return True
|
||||
elif response.status == httplib.NOT_FOUND:
|
||||
raise ContainerDoesNotExistError(value=None,
|
||||
driver=self,
|
||||
container_name=container.name)
|
||||
|
||||
return False
|
||||
|
||||
def download_object(self, obj, destination_path, overwrite_existing=False,
|
||||
delete_on_failure=True):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.download_object`
|
||||
"""
|
||||
obj_path = self._get_object_path(obj.container, obj.name)
|
||||
response = self.connection.request(obj_path, raw=True, data=None)
|
||||
|
||||
return self._get_object(obj=obj, callback=self._save_object,
|
||||
response=response,
|
||||
callback_kwargs={
|
||||
'obj': obj,
|
||||
'response': response.response,
|
||||
'destination_path': destination_path,
|
||||
'overwrite_existing': overwrite_existing,
|
||||
'delete_on_failure': delete_on_failure},
|
||||
success_status_code=httplib.OK)
|
||||
|
||||
def download_object_as_stream(self, obj, chunk_size=None):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.download_object_as_stream`
|
||||
"""
|
||||
obj_path = self._get_object_path(obj.container, obj.name)
|
||||
response = self.connection.request(obj_path, raw=True, data=None)
|
||||
|
||||
return self._get_object(obj=obj, callback=read_in_chunks,
|
||||
response=response,
|
||||
callback_kwargs={'iterator': response.response,
|
||||
'chunk_size': chunk_size},
|
||||
success_status_code=httplib.OK)
|
||||
|
||||
def _upload_in_chunks(self, response, data, iterator, object_path,
|
||||
blob_type, lease, calculate_hash=True):
|
||||
"""
|
||||
Uploads data from an interator in fixed sized chunks to S3
|
||||
|
||||
:param response: Response object from the initial POST request
|
||||
:type response: :class:`RawResponse`
|
||||
|
||||
:param data: Any data from the initial POST request
|
||||
:type data: ``str``
|
||||
|
||||
:param iterator: The generator for fetching the upload data
|
||||
:type iterator: ``generator``
|
||||
|
||||
:param object_path: The path of the object to which we are uploading
|
||||
:type object_name: ``str``
|
||||
|
||||
:param blob_type: The blob type being uploaded
|
||||
:type blob_type: ``str``
|
||||
|
||||
:param lease: The lease object to be used for renewal
|
||||
:type lease: :class:`AzureBlobLease`
|
||||
|
||||
:keyword calculate_hash: Indicates if we must calculate the data hash
|
||||
:type calculate_hash: ``bool``
|
||||
|
||||
:return: A tuple of (status, checksum, bytes transferred)
|
||||
:rtype: ``tuple``
|
||||
"""
|
||||
|
||||
# Get the upload id from the response xml
|
||||
if response.status != httplib.CREATED:
|
||||
raise LibcloudError('Error initializing upload. Code: %d' %
|
||||
(response.status), driver=self)
|
||||
|
||||
data_hash = None
|
||||
if calculate_hash:
|
||||
data_hash = self._get_hash_function()
|
||||
|
||||
bytes_transferred = 0
|
||||
count = 1
|
||||
chunks = []
|
||||
headers = {}
|
||||
|
||||
lease.update_headers(headers)
|
||||
|
||||
if blob_type == 'BlockBlob':
|
||||
params = {'comp': 'block'}
|
||||
else:
|
||||
params = {'comp': 'page'}
|
||||
|
||||
# Read the input data in chunk sizes suitable for AWS
|
||||
for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE):
|
||||
data = b(data)
|
||||
content_length = len(data)
|
||||
offset = bytes_transferred
|
||||
bytes_transferred += content_length
|
||||
|
||||
if calculate_hash:
|
||||
data_hash.update(data)
|
||||
|
||||
chunk_hash = self._get_hash_function()
|
||||
chunk_hash.update(data)
|
||||
chunk_hash = base64.b64encode(b(chunk_hash.digest()))
|
||||
|
||||
headers['Content-MD5'] = chunk_hash.decode('utf-8')
|
||||
headers['Content-Length'] = content_length
|
||||
|
||||
if blob_type == 'BlockBlob':
|
||||
# Block id can be any unique string that is base64 encoded
|
||||
# A 10 digit number can hold the max value of 50000 blocks
|
||||
# that are allowed for azure
|
||||
block_id = base64.b64encode(b('%10d' % (count)))
|
||||
block_id = block_id.decode('utf-8')
|
||||
params['blockid'] = block_id
|
||||
|
||||
# Keep this data for a later commit
|
||||
chunks.append(block_id)
|
||||
else:
|
||||
headers['x-ms-page-write'] = 'update'
|
||||
headers['x-ms-range'] = 'bytes=%d-%d' % \
|
||||
(offset, bytes_transferred-1)
|
||||
|
||||
# Renew lease before updating
|
||||
lease.renew()
|
||||
|
||||
resp = self.connection.request(object_path, method='PUT',
|
||||
data=data, headers=headers,
|
||||
params=params)
|
||||
|
||||
if resp.status != httplib.CREATED:
|
||||
resp.parse_error()
|
||||
raise LibcloudError('Error uploading chunk %d. Code: %d' %
|
||||
(count, resp.status), driver=self)
|
||||
|
||||
count += 1
|
||||
|
||||
if calculate_hash:
|
||||
data_hash = data_hash.hexdigest()
|
||||
|
||||
if blob_type == 'BlockBlob':
|
||||
self._commit_blocks(object_path, chunks, lease)
|
||||
|
||||
# The Azure service does not return a hash immediately for
|
||||
# chunked uploads. It takes some time for the data to get synced
|
||||
response.headers['content-md5'] = None
|
||||
|
||||
return (True, data_hash, bytes_transferred)
|
||||
|
||||
def _commit_blocks(self, object_path, chunks, lease):
|
||||
"""
|
||||
Makes a final commit of the data.
|
||||
|
||||
:param object_path: Server side object path.
|
||||
:type object_path: ``str``
|
||||
|
||||
:param upload_id: A list of (chunk_number, chunk_hash) tuples.
|
||||
:type upload_id: ``list``
|
||||
"""
|
||||
|
||||
root = Element('BlockList')
|
||||
|
||||
for block_id in chunks:
|
||||
part = SubElement(root, 'Uncommitted')
|
||||
part.text = str(block_id)
|
||||
|
||||
data = tostring(root)
|
||||
params = {'comp': 'blocklist'}
|
||||
headers = {}
|
||||
|
||||
lease.update_headers(headers)
|
||||
lease.renew()
|
||||
|
||||
response = self.connection.request(object_path, data=data,
|
||||
params=params, headers=headers,
|
||||
method='PUT')
|
||||
|
||||
if response.status != httplib.CREATED:
|
||||
raise LibcloudError('Error in blocklist commit', driver=self)
|
||||
|
||||
def _check_values(self, blob_type, object_size):
|
||||
"""
|
||||
Checks if extension arguments are valid
|
||||
|
||||
:param blob_type: The blob type that is being uploaded
|
||||
:type blob_type: ``str``
|
||||
|
||||
:param object_size: The (max) size of the object being uploaded
|
||||
:type object_size: ``int``
|
||||
"""
|
||||
|
||||
if blob_type not in ['BlockBlob', 'PageBlob']:
|
||||
raise LibcloudError('Invalid blob type', driver=self)
|
||||
|
||||
if blob_type == 'PageBlob':
|
||||
if not object_size:
|
||||
raise LibcloudError('Max blob size is mandatory for page blob',
|
||||
driver=self)
|
||||
|
||||
if object_size % AZURE_PAGE_CHUNK_SIZE:
|
||||
raise LibcloudError('Max blob size is not aligned to '
|
||||
'page boundary', driver=self)
|
||||
|
||||
def upload_object(self, file_path, container, object_name, extra=None,
|
||||
verify_hash=True, ex_blob_type=None, ex_use_lease=False):
|
||||
"""
|
||||
Upload an object currently located on a disk.
|
||||
|
||||
@inherits: :class:`StorageDriver.upload_object`
|
||||
|
||||
:param ex_blob_type: Storage class
|
||||
:type ex_blob_type: ``str``
|
||||
|
||||
:param ex_use_lease: Indicates if we must take a lease before upload
|
||||
:type ex_use_lease: ``bool``
|
||||
"""
|
||||
|
||||
if ex_blob_type is None:
|
||||
ex_blob_type = self.ex_blob_type
|
||||
|
||||
# Get the size of the file
|
||||
file_size = os.stat(file_path).st_size
|
||||
|
||||
# The presumed size of the object
|
||||
object_size = file_size
|
||||
|
||||
self._check_values(ex_blob_type, file_size)
|
||||
|
||||
with file(file_path, 'rb') as file_handle:
|
||||
iterator = iter(file_handle)
|
||||
|
||||
# If size is greater than 64MB or type is Page, upload in chunks
|
||||
if ex_blob_type == 'PageBlob' or file_size > AZURE_BLOCK_MAX_SIZE:
|
||||
# For chunked upload of block blobs, the initial size must
|
||||
# be 0.
|
||||
if ex_blob_type == 'BlockBlob':
|
||||
object_size = None
|
||||
|
||||
object_path = self._get_object_path(container, object_name)
|
||||
|
||||
upload_func = self._upload_in_chunks
|
||||
upload_func_kwargs = {'iterator': iterator,
|
||||
'object_path': object_path,
|
||||
'blob_type': ex_blob_type,
|
||||
'lease': None}
|
||||
else:
|
||||
upload_func = self._stream_data
|
||||
upload_func_kwargs = {'iterator': iterator,
|
||||
'chunked': False,
|
||||
'calculate_hash': verify_hash}
|
||||
|
||||
return self._put_object(container=container,
|
||||
object_name=object_name,
|
||||
object_size=object_size,
|
||||
upload_func=upload_func,
|
||||
upload_func_kwargs=upload_func_kwargs,
|
||||
file_path=file_path, extra=extra,
|
||||
verify_hash=verify_hash,
|
||||
blob_type=ex_blob_type,
|
||||
use_lease=ex_use_lease)
|
||||
|
||||
def upload_object_via_stream(self, iterator, container, object_name,
|
||||
verify_hash=False, extra=None,
|
||||
ex_use_lease=False, ex_blob_type=None,
|
||||
ex_page_blob_size=None):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.upload_object_via_stream`
|
||||
|
||||
:param ex_blob_type: Storage class
|
||||
:type ex_blob_type: ``str``
|
||||
|
||||
:param ex_page_blob_size: The maximum size to which the
|
||||
page blob can grow to
|
||||
:type ex_page_blob_size: ``int``
|
||||
|
||||
:param ex_use_lease: Indicates if we must take a lease before upload
|
||||
:type ex_use_lease: ``bool``
|
||||
"""
|
||||
|
||||
if ex_blob_type is None:
|
||||
ex_blob_type = self.ex_blob_type
|
||||
|
||||
self._check_values(ex_blob_type, ex_page_blob_size)
|
||||
|
||||
object_path = self._get_object_path(container, object_name)
|
||||
|
||||
upload_func = self._upload_in_chunks
|
||||
upload_func_kwargs = {'iterator': iterator,
|
||||
'object_path': object_path,
|
||||
'blob_type': ex_blob_type,
|
||||
'lease': None}
|
||||
|
||||
return self._put_object(container=container,
|
||||
object_name=object_name,
|
||||
object_size=ex_page_blob_size,
|
||||
upload_func=upload_func,
|
||||
upload_func_kwargs=upload_func_kwargs,
|
||||
extra=extra, verify_hash=verify_hash,
|
||||
blob_type=ex_blob_type,
|
||||
use_lease=ex_use_lease)
|
||||
|
||||
def delete_object(self, obj):
|
||||
"""
|
||||
@inherits: :class:`StorageDriver.delete_object`
|
||||
"""
|
||||
object_path = self._get_object_path(obj.container, obj.name)
|
||||
response = self.connection.request(object_path, method='DELETE')
|
||||
|
||||
if response.status == httplib.ACCEPTED:
|
||||
return True
|
||||
elif response.status == httplib.NOT_FOUND:
|
||||
raise ObjectDoesNotExistError(value=None, driver=self,
|
||||
object_name=obj.name)
|
||||
|
||||
return False
|
||||
|
||||
def _update_metadata(self, headers, meta_data):
|
||||
"""
|
||||
Update the given metadata in the headers
|
||||
|
||||
:param headers: The headers dictionary to be updated
|
||||
:type headers: ``dict``
|
||||
|
||||
:param meta_data: Metadata key value pairs
|
||||
:type meta_data: ``dict``
|
||||
"""
|
||||
for key, value in list(meta_data.items()):
|
||||
key = 'x-ms-meta-%s' % (key)
|
||||
headers[key] = value
|
||||
|
||||
def _prepare_upload_headers(self, object_name, object_size,
|
||||
extra, meta_data, blob_type):
|
||||
"""
|
||||
Prepare headers for uploading an object
|
||||
|
||||
:param object_name: The full name of the object being updated
|
||||
:type object_name: ``str``
|
||||
|
||||
:param object_size: The size of the object. In case of PageBlobs,
|
||||
this indicates the maximum size the blob can grow to
|
||||
:type object_size: ``int``
|
||||
|
||||
:param extra: Extra control data for the upload
|
||||
:type extra: ``dict``
|
||||
|
||||
:param meta_data: Metadata key value pairs
|
||||
:type meta_data: ``dict``
|
||||
|
||||
:param blob_type: Page or Block blob type
|
||||
:type blob_type: ``str``
|
||||
"""
|
||||
headers = {}
|
||||
|
||||
if blob_type is None:
|
||||
blob_type = self.ex_blob_type
|
||||
|
||||
headers['x-ms-blob-type'] = blob_type
|
||||
|
||||
self._update_metadata(headers, meta_data)
|
||||
|
||||
if object_size is not None:
|
||||
headers['Content-Length'] = object_size
|
||||
|
||||
if blob_type == 'PageBlob':
|
||||
headers['Content-Length'] = 0
|
||||
headers['x-ms-blob-content-length'] = object_size
|
||||
|
||||
return headers
|
||||
|
||||
def _put_object(self, container, object_name, object_size, upload_func,
|
||||
upload_func_kwargs, file_path=None, extra=None,
|
||||
verify_hash=True, blob_type=None, use_lease=False):
|
||||
"""
|
||||
Control function that does the real job of uploading data to a blob
|
||||
"""
|
||||
extra = extra or {}
|
||||
meta_data = extra.get('meta_data', {})
|
||||
content_type = extra.get('content_type', None)
|
||||
|
||||
headers = self._prepare_upload_headers(object_name, object_size,
|
||||
extra, meta_data, blob_type)
|
||||
|
||||
object_path = self._get_object_path(container, object_name)
|
||||
|
||||
# Get a lease if required and do the operations
|
||||
with AzureBlobLease(self, object_path, use_lease) as lease:
|
||||
if 'lease' in upload_func_kwargs:
|
||||
upload_func_kwargs['lease'] = lease
|
||||
|
||||
lease.update_headers(headers)
|
||||
|
||||
iterator = iter('')
|
||||
result_dict = self._upload_object(object_name, content_type,
|
||||
upload_func, upload_func_kwargs,
|
||||
object_path, headers=headers,
|
||||
file_path=file_path,
|
||||
iterator=iterator)
|
||||
|
||||
response = result_dict['response']
|
||||
bytes_transferred = result_dict['bytes_transferred']
|
||||
data_hash = result_dict['data_hash']
|
||||
headers = response.headers
|
||||
response = response.response
|
||||
|
||||
if response.status != httplib.CREATED:
|
||||
raise LibcloudError(
|
||||
'Unexpected status code, status_code=%s' % (response.status),
|
||||
driver=self)
|
||||
|
||||
server_hash = headers['content-md5']
|
||||
|
||||
if server_hash:
|
||||
server_hash = binascii.hexlify(base64.b64decode(b(server_hash)))
|
||||
server_hash = server_hash.decode('utf-8')
|
||||
else:
|
||||
# TODO: HACK - We could poll the object for a while and get
|
||||
# the hash
|
||||
pass
|
||||
|
||||
if (verify_hash and server_hash and data_hash != server_hash):
|
||||
raise ObjectHashMismatchError(
|
||||
value='MD5 hash checksum does not match',
|
||||
object_name=object_name, driver=self)
|
||||
|
||||
return Object(name=object_name, size=bytes_transferred,
|
||||
hash=headers['etag'], extra=None,
|
||||
meta_data=meta_data, container=container,
|
||||
driver=self)
|
||||
|
||||
def ex_set_object_metadata(self, obj, meta_data):
|
||||
"""
|
||||
Set metadata for an object
|
||||
|
||||
:param obj: The blob object
|
||||
:type obj: :class:`Object`
|
||||
|
||||
:param meta_data: Metadata key value pairs
|
||||
:type meta_data: ``dict``
|
||||
"""
|
||||
object_path = self._get_object_path(obj.container, obj.name)
|
||||
params = {'comp': 'metadata'}
|
||||
headers = {}
|
||||
|
||||
self._update_metadata(headers, meta_data)
|
||||
|
||||
response = self.connection.request(object_path, method='PUT',
|
||||
params=params,
|
||||
headers=headers)
|
||||
|
||||
if response.status != httplib.OK:
|
||||
response.parse_error('Setting metadata')
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user