diff --git a/awx/lib/site-packages/README b/awx/lib/site-packages/README
index 544fddd707..89ef6bbb25 100644
--- a/awx/lib/site-packages/README
+++ b/awx/lib/site-packages/README
@@ -43,7 +43,7 @@ pexpect==3.3 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py,
ANSI.py)
pip==1.5.4 (pip/*, excluded bin/pip*)
prettytable==0.7.2 (prettytable.py)
-pyrax==1.7.2 (pyrax/*)
+pyrax==1.9.0 (pyrax/*)
python-dateutil==2.2 (dateutil/*)
python-novaclient==2.17.0 (novaclient/*, excluded bin/nova)
python-swiftclient==2.0.3 (swiftclient/*, excluded bin/swift)
diff --git a/awx/lib/site-packages/pyrax/__init__.py b/awx/lib/site-packages/pyrax/__init__.py
index 57e37bee29..b63526e5c0 100644
--- a/awx/lib/site-packages/pyrax/__init__.py
+++ b/awx/lib/site-packages/pyrax/__init__.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -27,22 +27,17 @@ built on the Rackspace / OpenStack Cloud.
The source code for pyrax can be found at:
http://github.com/rackspace/pyrax
-
-\package cf_wrapper
-
-This module wraps swiftclient, the Python client for OpenStack / Swift,
-providing an object-oriented interface to the Swift object store.
-
-It also adds in CDN functionality that is Rackspace-specific.
"""
+
+from __future__ import absolute_import
from functools import wraps
import inspect
import logging
import os
+import re
+import six.moves.configparser as ConfigParser
import warnings
-from six.moves import configparser
-
# keyring is an optional import
try:
import keyring
@@ -59,21 +54,22 @@ try:
from . import http
from . import version
- import cf_wrapper.client as _cf
from novaclient import exceptions as _cs_exceptions
from novaclient import auth_plugin as _cs_auth_plugin
+ from novaclient.shell import OpenStackComputeShell as _cs_shell
from novaclient.v1_1 import client as _cs_client
from novaclient.v1_1.servers import Server as CloudServer
- from autoscale import AutoScaleClient
- from clouddatabases import CloudDatabaseClient
- from cloudloadbalancers import CloudLoadBalancerClient
- from cloudblockstorage import CloudBlockStorageClient
- from clouddns import CloudDNSClient
- from cloudnetworks import CloudNetworkClient
- from cloudmonitoring import CloudMonitorClient
- from image import ImageClient
- from queueing import QueueClient
+ from .autoscale import AutoScaleClient
+ from .clouddatabases import CloudDatabaseClient
+ from .cloudloadbalancers import CloudLoadBalancerClient
+ from .cloudblockstorage import CloudBlockStorageClient
+ from .clouddns import CloudDNSClient
+ from .cloudnetworks import CloudNetworkClient
+ from .cloudmonitoring import CloudMonitorClient
+ from .image import ImageClient
+ from .object_storage import StorageClient
+ from .queueing import QueueClient
except ImportError:
# See if this is the result of the importing of version.py in setup.py
callstack = inspect.stack()
@@ -118,6 +114,8 @@ regions = tuple()
services = tuple()
_client_classes = {
+ "compute": _cs_client.Client,
+ "object_store": StorageClient,
"database": CloudDatabaseClient,
"load_balancer": CloudLoadBalancerClient,
"volume": CloudBlockStorageClient,
@@ -168,7 +166,7 @@ class Settings(object):
"verify_ssl": "CLOUD_VERIFY_SSL",
"use_servicenet": "USE_SERVICENET",
}
- _settings = {"default": dict.fromkeys(env_dct.keys())}
+ _settings = {"default": dict.fromkeys(list(env_dct.keys()))}
_default_set = False
@@ -181,8 +179,10 @@ class Settings(object):
if env is None:
env = self.environment
try:
- return self._settings[env][key]
+ ret = self._settings[env][key]
except KeyError:
+ ret = None
+ if ret is None:
# See if it's set in the environment
if key == "identity_class":
# This is defined via the identity_type
@@ -193,9 +193,10 @@ class Settings(object):
else:
env_var = self.env_dct.get(key)
try:
- return os.environ[env_var]
+ ret = os.environ[env_var]
except KeyError:
- return None
+ ret = None
+ return ret
def set(self, key, val, env=None):
@@ -210,7 +211,7 @@ class Settings(object):
else:
if env not in self._settings:
raise exc.EnvironmentNotFound("There is no environment named "
- "'%s'." % env)
+ "'%s'." % env)
dct = self._settings[env]
if key not in dct:
raise exc.InvalidSetting("The setting '%s' is not defined." % key)
@@ -257,7 +258,7 @@ class Settings(object):
@property
def environments(self):
- return self._settings.keys()
+ return list(self._settings.keys())
def read_config(self, config_file):
@@ -265,17 +266,17 @@ class Settings(object):
Parses the specified configuration file and stores the values. Raises
an InvalidConfigurationFile exception if the file is not well-formed.
"""
- cfg = configparser.SafeConfigParser()
+ cfg = ConfigParser.SafeConfigParser()
try:
cfg.read(config_file)
- except configparser.MissingSectionHeaderError as e:
+ except ConfigParser.MissingSectionHeaderError as e:
# The file exists, but doesn't have the correct format.
raise exc.InvalidConfigurationFile(e)
def safe_get(section, option, default=None):
try:
return cfg.get(section, option)
- except (configparser.NoSectionError, configparser.NoOptionError):
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
return default
# A common mistake is including credentials in the config file. If any
@@ -297,8 +298,9 @@ class Settings(object):
dct = self._settings[section_name] = {}
dct["region"] = safe_get(section, "region", default_region)
ityp = safe_get(section, "identity_type")
- dct["identity_type"] = _id_type(ityp)
- dct["identity_class"] = _import_identity(ityp)
+ if ityp:
+ dct["identity_type"] = _id_type(ityp)
+ dct["identity_class"] = _import_identity(ityp)
# Handle both the old and new names for this setting.
debug = safe_get(section, "debug")
if debug is None:
@@ -377,18 +379,46 @@ def set_default_region(region):
default_region = region
-def _create_identity():
+def create_context(id_type=None, env=None, username=None, password=None,
+ tenant_id=None, tenant_name=None, api_key=None, verify_ssl=None):
+ """
+ Returns an instance of the specified identity class, or if none is
+ specified, an instance of the current setting for 'identity_class'.
+
+ You may optionally set the environment by passing the name of that
+ environment in the 'env' parameter.
+ """
+ if env:
+ set_environment(env)
+ return _create_identity(id_type=id_type, username=username,
+ password=password, tenant_id=tenant_id, tenant_name=tenant_name,
+ api_key=api_key, verify_ssl=verify_ssl, return_context=True)
+
+
+def _create_identity(id_type=None, username=None, password=None, tenant_id=None,
+ tenant_name=None, api_key=None, verify_ssl=None,
+ return_context=False):
"""
Creates an instance of the current identity_class and assigns it to the
- module-level name 'identity'.
+ module-level name 'identity' by default. If 'return_context' is True, the
+ module-level 'identity' is untouched, and instead the instance is returned.
"""
- global identity
- cls = settings.get("identity_class")
+ if id_type:
+ cls = _import_identity(id_type)
+ else:
+ cls = settings.get("identity_class")
if not cls:
raise exc.IdentityClassNotDefined("No identity class has "
"been defined for the current environment.")
- verify_ssl = get_setting("verify_ssl")
- identity = cls(verify_ssl=verify_ssl)
+ if verify_ssl is None:
+ verify_ssl = get_setting("verify_ssl")
+ context = cls(username=username, password=password, tenant_id=tenant_id,
+ tenant_name=tenant_name, api_key=api_key, verify_ssl=verify_ssl)
+ if return_context:
+ return context
+ else:
+ global identity
+ identity = context
def _assure_identity(fnc):
@@ -412,13 +442,16 @@ def _require_auth(fnc):
return _wrapped
-@_assure_identity
-def _safe_region(region=None):
+def _safe_region(region=None, context=None):
"""Value to use when no region is specified."""
ret = region or settings.get("region")
+ context = context or identity
if not ret:
# Nothing specified; get the default from the identity object.
- ret = identity.get_default_region()
+ if not context:
+ _create_identity()
+ context = identity
+ ret = context.get_default_region()
if not ret:
# Use the first available region
try:
@@ -434,8 +467,11 @@ def auth_with_token(token, tenant_id=None, tenant_name=None, region=None):
If you already have a valid token and either a tenant ID or name, you can
call this to configure the identity and available services.
"""
+ global regions, services
identity.auth_with_token(token, tenant_id=tenant_id,
tenant_name=tenant_name)
+ regions = tuple(identity.regions)
+ services = tuple(identity.services.keys())
connect_to_services(region=region)
@@ -448,13 +484,15 @@ def set_credentials(username, api_key=None, password=None, region=None,
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections.
"""
+ global regions, services
pw_key = password or api_key
region = _safe_region(region)
tenant_id = tenant_id or settings.get("tenant_id")
identity.set_credentials(username=username, password=pw_key,
- tenant_id=tenant_id, region=region)
- if authenticate:
- _auth_and_connect(region=region)
+ tenant_id=tenant_id, region=region, authenticate=authenticate)
+ regions = tuple(identity.regions)
+ services = tuple(identity.services.keys())
+ connect_to_services(region=region)
@_assure_identity
@@ -478,10 +516,13 @@ def set_credential_file(cred_file, region=None, authenticate=True):
If the region is passed, it will authenticate against the proper endpoint
for that region, and set the default region for connections.
"""
+ global regions, services
region = _safe_region(region)
- identity.set_credential_file(cred_file, region=region)
- if authenticate:
- _auth_and_connect(region=region)
+ identity.set_credential_file(cred_file, region=region,
+ authenticate=authenticate)
+ regions = tuple(identity.regions)
+ services = tuple(identity.services.keys())
+ connect_to_services(region=region)
def keyring_auth(username=None, region=None, authenticate=True):
@@ -514,23 +555,6 @@ def keyring_auth(username=None, region=None, authenticate=True):
authenticate=authenticate)
-def _auth_and_connect(region=None, connect=True):
- """
- Handles the call to authenticate, and if successful, connects to the
- various services.
- """
- global default_region
- identity.authenticated = False
- default_region = region or default_region
- try:
- identity.authenticate()
- except exc.AuthenticationFailed:
- clear_credentials()
- raise
- if connect:
- connect_to_services(region=region)
-
-
@_assure_identity
def authenticate(connect=True):
"""
@@ -545,19 +569,11 @@ def authenticate(connect=True):
Normally after successful authentication, connections to the various
services will be made. However, passing False to the `connect` parameter
will skip the service connection step.
- """
- _auth_and_connect(connect=connect)
-
-def plug_hole_in_swiftclient_auth(clt, url):
+ The 'connect' parameter is retained for backwards compatibility. It no
+ longer has any effect.
"""
- This is necessary because swiftclient has an issue when a token expires and
- it needs to re-authenticate against Rackspace auth. It is a temporary
- workaround until we can fix swiftclient.
- """
- conn = clt.connection
- conn.token = identity.token
- conn.url = url
+ identity.authenticate()
def clear_credentials():
@@ -610,46 +626,52 @@ def connect_to_services(region=None):
queues = connect_to_queues(region=region)
-def _get_service_endpoint(svc, region=None, public=True):
+def _get_service_endpoint(context, svc, region=None, public=True):
"""
Parses the services dict to get the proper endpoint for the given service.
"""
region = _safe_region(region)
- url_type = {True: "public_url", False: "internal_url"}[public]
- ep = identity.services.get(svc, {}).get("endpoints", {}).get(
- region, {}).get(url_type)
+ # If a specific context is passed, use that. Otherwise, use the global
+ # identity reference.
+ context = context or identity
+ url_type = {True: "public", False: "private"}[public]
+ svc_obj = context.services.get(svc)
+ if not svc_obj:
+ return None
+ ep = svc_obj.endpoints.get(region, {}).get(url_type)
if not ep:
# Try the "ALL" region, and substitute the actual region
- ep = identity.services.get(svc, {}).get("endpoints", {}).get(
- "ALL", {}).get(url_type)
+ ep = svc_obj.endpoints.get("ALL", {}).get(url_type)
return ep
-@_require_auth
-def connect_to_cloudservers(region=None, **kwargs):
+def connect_to_cloudservers(region=None, context=None, **kwargs):
"""Creates a client for working with cloud servers."""
+ context = context or identity
_cs_auth_plugin.discover_auth_systems()
id_type = get_setting("identity_type")
if id_type != "keystone":
auth_plugin = _cs_auth_plugin.load_plugin(id_type)
else:
auth_plugin = None
- region = _safe_region(region)
- mgt_url = _get_service_endpoint("compute", region)
+ region = _safe_region(region, context=context)
+ mgt_url = _get_service_endpoint(context, "compute", region)
cloudservers = None
if not mgt_url:
# Service is not available
return
insecure = not get_setting("verify_ssl")
- cloudservers = _cs_client.Client(identity.username, identity.password,
- project_id=identity.tenant_id, auth_url=identity.auth_endpoint,
+ cs_shell = _cs_shell()
+ extensions = cs_shell._discover_extensions("1.1")
+ cloudservers = _cs_client.Client(context.username, context.password,
+ project_id=context.tenant_id, auth_url=context.auth_endpoint,
auth_system=id_type, region_name=region, service_type="compute",
- auth_plugin=auth_plugin, insecure=insecure,
+ auth_plugin=auth_plugin, insecure=insecure, extensions=extensions,
http_log_debug=_http_debug, **kwargs)
agt = cloudservers.client.USER_AGENT
cloudservers.client.USER_AGENT = _make_agent_name(agt)
cloudservers.client.management_url = mgt_url
- cloudservers.client.auth_token = identity.token
+ cloudservers.client.auth_token = context.token
cloudservers.exceptions = _cs_exceptions
# Add some convenience methods
cloudservers.list_images = cloudservers.images.list
@@ -672,54 +694,50 @@ def connect_to_cloudservers(region=None, **kwargs):
return [image for image in cloudservers.images.list()
if hasattr(image, "server")]
+ def find_images_by_name(expr):
+ """
+ Returns a list of images whose name contains the specified expression.
+ The value passed is treated as a regular expression, allowing for more
+ specific searches than simple wildcards. The matching is done in a
+ case-insensitive manner.
+ """
+ return [image for image in cloudservers.images.list()
+ if re.search(expr, image.name, re.I)]
+
cloudservers.list_base_images = list_base_images
cloudservers.list_snapshots = list_snapshots
+ cloudservers.find_images_by_name = find_images_by_name
+ cloudservers.identity = identity
return cloudservers
-@_require_auth
def connect_to_cloudfiles(region=None, public=None):
- """
- Creates a client for working with cloud files. The default is to connect
- to the public URL; if you need to work with the ServiceNet connection, pass
- False to the 'public' parameter or set the "use_servicenet" setting to True.
- """
+ """Creates a client for working with CloudFiles/Swift."""
if public is None:
is_public = not bool(get_setting("use_servicenet"))
else:
is_public = public
-
- region = _safe_region(region)
- cf_url = _get_service_endpoint("object_store", region, public=is_public)
- cloudfiles = None
- if not cf_url:
- # Service is not available
- return
- cdn_url = _get_service_endpoint("object_cdn", region)
- ep_type = {True: "publicURL", False: "internalURL"}[is_public]
- opts = {"tenant_id": identity.tenant_name, "auth_token": identity.token,
- "endpoint_type": ep_type, "tenant_name": identity.tenant_name,
- "object_storage_url": cf_url, "object_cdn_url": cdn_url,
- "region_name": region}
- verify_ssl = get_setting("verify_ssl")
- cloudfiles = _cf.CFClient(identity.auth_endpoint, identity.username,
- identity.password, tenant_name=identity.tenant_name,
- preauthurl=cf_url, preauthtoken=identity.token, auth_version="2",
- os_options=opts, verify_ssl=verify_ssl, http_log_debug=_http_debug)
- cloudfiles.user_agent = _make_agent_name(cloudfiles.user_agent)
- return cloudfiles
+ ret = _create_client(ep_name="object_store", region=region,
+ public=is_public)
+ if ret:
+ # Add CDN endpoints, if available
+ region = _safe_region(region)
+ ret.cdn_management_url = _get_service_endpoint(None, "object_cdn",
+ region, public=is_public)
+ return ret
@_require_auth
def _create_client(ep_name, region, public=True):
region = _safe_region(region)
- ep = _get_service_endpoint(ep_name.split(":")[0], region, public=public)
+ ep = _get_service_endpoint(None, ep_name.split(":")[0], region,
+ public=public)
if not ep:
return
verify_ssl = get_setting("verify_ssl")
cls = _client_classes[ep_name]
- client = cls(region_name=region, management_url=ep, verify_ssl=verify_ssl,
- http_log_debug=_http_debug)
+ client = cls(identity, region_name=region, management_url=ep,
+ verify_ssl=verify_ssl, http_log_debug=_http_debug)
client.user_agent = _make_agent_name(client.user_agent)
return client
@@ -769,34 +787,29 @@ def connect_to_queues(region=None, public=True):
return _create_client(ep_name="queues", region=region, public=public)
+def client_class_for_service(service):
+ """
+ Returns the client class registered for the given service, or None if there
+ is no such service, or if no class has been registered.
+ """
+ return _client_classes.get(service)
+
+
def get_http_debug():
return _http_debug
-@_assure_identity
def set_http_debug(val):
global _http_debug
_http_debug = val
# Set debug on the various services
- identity.http_log_debug = val
+ if identity:
+ identity.http_log_debug = val
for svc in (cloudservers, cloudfiles, cloud_loadbalancers,
cloud_blockstorage, cloud_databases, cloud_dns, cloud_networks,
autoscale, images, queues):
if svc is not None:
svc.http_log_debug = val
- # Need to manually add/remove the debug handler for swiftclient
- swift_logger = _cf._swift_client.logger
- if val:
- for handler in swift_logger.handlers:
- if isinstance(handler, logging.StreamHandler):
- # Already present
- return
- swift_logger.addHandler(logging.StreamHandler())
- swift_logger.setLevel(logging.DEBUG)
- else:
- for handler in swift_logger.handlers:
- if isinstance(handler, logging.StreamHandler):
- swift_logger.removeHandler(handler)
def get_encoding():
diff --git a/awx/lib/site-packages/pyrax/autoscale.py b/awx/lib/site-packages/pyrax/autoscale.py
index d7e8c09583..91fab8e3f0 100644
--- a/awx/lib/site-packages/pyrax/autoscale.py
+++ b/awx/lib/site-packages/pyrax/autoscale.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2013 Rackspace
+# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -17,6 +17,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import base64
+
import pyrax
from pyrax.client import BaseClient
from pyrax.cloudloadbalancers import CloudLoadBalancer
@@ -441,9 +443,9 @@ class ScalingGroupManager(BaseManager):
largs = scaling_group.launchConfiguration.get("args", {})
srv_args = largs.get("server", {})
lb_args = largs.get("loadBalancers", {})
- flav = "%s" % flavor or srv_args.get("flavorRef")
- dconf = disk_config or srv_args.get("OS-DCF:diskConfig")
- pers = personality or srv_args.get("personality")
+ flav = flavor or srv_args.get("flavorRef")
+ dconf = disk_config or srv_args.get("OS-DCF:diskConfig", "AUTO")
+ pers = personality or srv_args.get("personality", [])
body = {"type": "launch_server",
"args": {
"server": {
@@ -451,13 +453,14 @@ class ScalingGroupManager(BaseManager):
"imageRef": image or srv_args.get("imageRef"),
"flavorRef": flav,
"OS-DCF:diskConfig": dconf,
- "personality": pers,
"networks": networks or srv_args.get("networks"),
"metadata": metadata or srv_args.get("metadata"),
},
"loadBalancers": load_balancers or lb_args,
},
}
+ if pers:
+ body["args"]["server"]["personality"] = pers
key_name = key_name or srv_args.get("key_name")
if key_name:
body["args"]["server"] = key_name
@@ -765,6 +768,10 @@ class ScalingGroupManager(BaseManager):
metadata = {}
if personality is None:
personality = []
+ else:
+ for file in personality:
+ if "contents" in file:
+ file["contents"] = base64.b64encode(file["contents"])
if scaling_policies is None:
scaling_policies = []
group_config = self._create_group_config_body(name, cooldown,
diff --git a/awx/lib/site-packages/pyrax/base_identity.py b/awx/lib/site-packages/pyrax/base_identity.py
index 64057b03d2..580decbcc3 100644
--- a/awx/lib/site-packages/pyrax/base_identity.py
+++ b/awx/lib/site-packages/pyrax/base_identity.py
@@ -1,18 +1,25 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-from __future__ import print_function
+from __future__ import absolute_import
+
+import six.moves.configparser as ConfigParser
import datetime
import json
import re
+import requests
+import warnings
-from six.moves import configparser
+try:
+ import keyring
+except ImportError:
+ keyring = None
import pyrax
-import pyrax.exceptions as exc
-from pyrax.resource import BaseResource
-import pyrax.utils as utils
+from pyrax import exceptions as exc
+from .resource import BaseResource
+from . import utils as utils
_pat = r"""
@@ -33,6 +40,9 @@ API_DATE_PATTERN = re.compile(_pat, re.VERBOSE)
UTC_API_DATE_PATTERN = re.compile(_utc_pat, re.VERBOSE)
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+# Default region for all services. Can be individually overridden if needed
+default_region = None
+
class Tenant(BaseResource):
pass
@@ -42,36 +52,267 @@ class User(BaseResource):
pass
-class BaseAuth(object):
+class Role(BaseResource):
+ pass
+
+
+class Service(object):
+ """
+ Represents an available service from the service catalog.
+ """
+ def __init__(self, identity, catalog):
+ """
+ Parse the catalog entry for a particular service.
+ """
+ self.identity = identity
+ self.name = catalog.get("name")
+ # Replace any dashes with underscores.
+ fulltype = catalog["type"].replace("-", "_")
+ # Some provider-specific services are prefixed with that info.
+ try:
+ self.prefix, self.service_type = fulltype.split(":")
+ except ValueError:
+ self.prefix = ""
+ self.service_type = fulltype
+ if self.service_type == "compute":
+ if self.name.lower() == "cloudservers":
+ # First-generation Rackspace cloud servers
+ return
+ self.clients = {}
+ self.endpoints = utils.DotDict()
+ eps = catalog.get("endpoints", [])
+ for ep in eps:
+ rgn = ep.get("region", "ALL")
+ self.endpoints[rgn] = Endpoint(ep, self.service_type, rgn, identity)
+ return
+
+
+ def __repr__(self):
+ memloc = hex(id(self))
+ return "<'%s' Service object at %s>" % (self.service_type, memloc)
+
+
+ def _ep_for_region(self, region):
+ """
+ Given a region, returns the Endpoint for that region, or the Endpoint
+ for the ALL region if no match is found. If no match is found, None
+ is returned, and it is up to the calling method to handle it
+ appropriately.
+ """
+ rgn = region.upper()
+ try:
+ rgn_ep = [ep for ep in list(self.endpoints.values())
+ if ep.region.upper() == rgn][0]
+ except IndexError:
+ # See if there is an 'ALL' region.
+ try:
+ rgn_ep = [ep for ep in list(self.endpoints.values())
+ if ep.region.upper() == "ALL"][0]
+ except IndexError:
+ rgn_ep = None
+ return rgn_ep
+
+
+ def get_client(self, region):
+ """
+ Returns an instance of the appropriate client class for the given
+ region. If there is no endpoint for that region, a NoEndpointForRegion
+ exception is raised.
+ """
+ ep = self._ep_for_region(region)
+ if not ep:
+ raise exc.NoEndpointForRegion("There is no endpoint defined for the "
+ "region '%s' for the '%s' service." % (region,
+ self.service_type))
+ return ep.client
+
+
+ @property
+ def regions(self):
+ """
+ Returns a list of all regions which support this service.
+ """
+ return list(self.endpoints.keys())
+
+
+
+class Endpoint(object):
+ """
+ Holds the endpoint information, as well as an instance of the appropriate
+ client for that service and region.
+ """
+ public_url = None
+ private_url = None
+ tenant_id = None
+ region = None
+ _client = None
+ _client_private = None
+ attr_map = {"publicURL": "public_url",
+ "privateURL": "private_url",
+ "internalURL": "private_url",
+ "tenantId": "tenant_id",
+ }
+
+
+ def __init__(self, ep_dict, service, region, identity):
+ """
+ Set local attributes from the supplied dictionary.
+ """
+ self.service = service
+ self.region = region
+ self.identity = identity
+ for key, val in list(ep_dict.items()):
+ att_name = self.attr_map.get(key, key)
+ setattr(self, att_name, val)
+
+
+ def get_new_client(self, public=True):
+ """
+ Returns a new instance of the client for this endpoint.
+ """
+ return self._get_client(public=public, cached=False)
+
+
+ def _get_client(self, public=True, cached=True, client_class=None):
+ client_att = "_client" if public else "_client_private"
+ clt = getattr(self, client_att)
+ if isinstance(clt, exc.NoClientForService):
+ # Already failed
+ raise clt
+ if cached and clt is not None:
+ if client_class and not isinstance(clt, client_class):
+ clt = None
+ else:
+ return clt
+ # Create the client
+ special_class = bool(client_class)
+ if special_class:
+ clt_class = client_class
+ else:
+ clt_class = pyrax.client_class_for_service(self.service)
+ if clt_class is None:
+ noclass = exc.NoClientForService("No client for the '%s' service "
+ "has been registered." % self.service)
+ setattr(self, client_att, noclass)
+ raise noclass
+ url_att = "public_url" if public else "private_url"
+ url = getattr(self, url_att)
+ if not url:
+ nourl = exc.NoEndpointForService("No %s endpoint is available for "
+ "the '%s' service." % (url_att, self.service))
+ setattr(self, client_att, nourl)
+ raise nourl
+ clt = self._create_client(clt_class, url, public=public,
+ special=special_class)
+ setattr(self, client_att, clt)
+ return clt
+
+
+ def get(self, url_type):
+ """
+ Accepts either 'public' or 'private' as a parameter, and returns the
+ corresponding value for 'public_url' or 'private_url', respectively.
+ """
+ lowtype = url_type.lower()
+ if lowtype == "public":
+ return self.public_url
+ elif lowtype == "private":
+ return self.private_url
+ else:
+ raise ValueError("Valid values are 'public' or 'private'; "
+ "received '%s'." % url_type)
+
+
+ def __getattr__(self, att):
+ clt = self.client
+ ret = getattr(clt, att, None)
+ if ret:
+ return ret
+ else:
+ raise AttributeError("Endpoint for service '%s' in region '%s' "
+ "has no attribute '%s'." % (self.service, self.region, att))
+
+
+ @property
+ def client(self):
+ return self._get_client(public=True)
+
+
+ @property
+ def client_private(self):
+ return self._get_client(public=False)
+
+
+ def _create_client(self, clt_class, url, public=True, special=False):
+ """
+ Creates a client instance for the service.
+ """
+ verify_ssl = pyrax.get_setting("verify_ssl")
+ if self.service == "compute" and not special:
+ # Novaclient requires different parameters.
+ client = pyrax.connect_to_cloudservers(region=self.region,
+ context=self.identity)
+ client.identity = self.identity
+ else:
+ client = clt_class(self.identity, region_name=self.region,
+ management_url=url, verify_ssl=verify_ssl)
+ return client
+
+
+
+class BaseIdentity(object):
"""
This class handles all of the basic authentication requirements for working
with an OpenStack Cloud system.
"""
- username = ""
- password = ""
- token = ""
- expires = ""
- tenant_id = ""
- tenant_name = ""
- authenticated = False
- user_agent = "pyrax"
- http_log_debug = False
- _default_region = None
+ _creds_style = "password"
-
- def __init__(self, username=None, password=None, token=None,
+ def __init__(self, username=None, password=None, tenant_id=None,
+ tenant_name=None, auth_endpoint=None, api_key=None, token=None,
credential_file=None, region=None, timeout=None, verify_ssl=True):
-
+ """
+ Initializes the attributes for this identity object.
+ """
self.username = username
self.password = password
+ self.tenant_id = tenant_id
+ self.tenant_name = tenant_name
self.token = token
+ self.expires = None
self.region = region
self._creds_file = credential_file
self._timeout = timeout
- self.services = {}
- self.regions = set()
self.verify_ssl = verify_ssl
- self._auth_endpoint = None
+ self._auth_endpoint = auth_endpoint
+ self.api_key = api_key
+ self.services = utils.DotDict()
+ self.regions = utils.DotDict()
+ self._default_creds_style = "password"
+ self.authenticated = False
+ self.user_agent = "pyrax"
+ self.http_log_debug = False
+ self._default_region = None
+ self.service_mapping = {
+ "cloudservers": "compute",
+ "nova": "compute",
+ "cloudfiles": "object_store",
+ "swift": "object_store",
+ "cloud_loadbalancers": "load_balancer",
+ "cloud_databases": "database",
+ "trove": "database",
+ "cloud_blockstorage": "volume",
+ "cinder": "volume",
+ "cloud_dns": "dns",
+ "designate": "dns",
+ "cloud_networks": "raxnetwork",
+ "neutron": "network",
+ "cloud_monitoring": "monitor",
+ "autoscale": "autoscale",
+ "images": "image",
+ "glance": "image",
+ "queues": "queues",
+ "marconi": "queues",
+ }
@property
@@ -82,7 +323,9 @@ class BaseAuth(object):
@property
def auth_endpoint(self):
- """Abstracts out the logic for connecting to different auth endpoints."""
+ """
+ Abstracts out the logic for connecting to different auth endpoints.
+ """
return self._get_auth_endpoint()
@@ -92,9 +335,10 @@ class BaseAuth(object):
def _get_auth_endpoint(self):
- """Each subclass will have to implement its own method."""
- raise NotImplementedError("The _get_auth_endpoint() method must be "
- "defined in Auth subclasses.")
+ """
+ Broken out in case subclasses need to determine endpoints dynamically.
+ """
+ return self._auth_endpoint or pyrax.get_setting("auth_endpoint")
def get_default_region(self):
@@ -106,6 +350,64 @@ class BaseAuth(object):
return self._default_region
+ def __getattr__(self, att):
+ """
+ Magic to allow for specification of client by region/service or by
+ service/region.
+
+ If a service is specified, this should return an object whose endpoints
+ contain keys for each available region for that service. If a region is
+ specified, an object with keys for each service available in that
+ region should be returned.
+ """
+ if not self.authenticated:
+ raise exc.NotAuthenticated("Authentication required before "
+ "accessing the context.")
+ # First see if it's a service
+ att = self.service_mapping.get(att) or att
+ svc = self.services.get(att)
+ if svc is not None:
+ return svc.endpoints
+ # Either invalid service, or a region
+ ret = utils.DotDict([(stype, svc.endpoints.get(att))
+ for stype, svc in list(self.services.items())
+ if svc.endpoints.get(att) is not None])
+ ret._att_mapper.update(self.service_mapping)
+ if ret:
+ return ret
+ # Invalid attribute
+ raise AttributeError("No such attribute '%s'." % att)
+
+
+ def get_client(self, service, region, public=True, cached=True,
+ client_class=None):
+ """
+ Returns the client object for the specified service and region.
+
+ By default the public endpoint is used. If you wish to work with a
+ services internal endpoints, specify `public=False`.
+
+ By default, if a client has already been created for the given service,
+ region, and public values, that will be returned. To force a new client
+ to be created, pass 'cached=False'.
+ """
+ if not self.authenticated:
+ raise exc.NotAuthenticated("You must authenticate before trying "
+ "to create clients.")
+ clt = ep = None
+ mapped_service = self.service_mapping.get(service) or service
+ svc = self.services.get(mapped_service)
+ if svc:
+ ep = svc.endpoints.get(region)
+ if ep:
+ clt = ep._get_client(public=public, cached=cached,
+ client_class=client_class)
+ if not clt:
+ raise exc.NoSuchClient("There is no client available for the "
+ "service '%s' in the region '%s'." % (service, region))
+ return clt
+
+
def set_credentials(self, username, password=None, region=None,
tenant_id=None, authenticate=False):
"""Sets the username and password directly."""
@@ -119,7 +421,7 @@ class BaseAuth(object):
def set_credential_file(self, credential_file, region=None,
- tenant_id=tenant_id, authenticate=False):
+ tenant_id=None, authenticate=False):
"""
Reads in the credentials from the supplied file. It should be
a standard config file in the format:
@@ -131,19 +433,19 @@ class BaseAuth(object):
"""
self._creds_file = credential_file
- cfg = configparser.SafeConfigParser()
+ cfg = ConfigParser.SafeConfigParser()
try:
if not cfg.read(credential_file):
- # If the specified file does not exist, the parser will
- # return an empty list
+ # If the specified file does not exist, the parser returns an
+ # empty list.
raise exc.FileNotFound("The specified credential file '%s' "
"does not exist" % credential_file)
- except configparser.MissingSectionHeaderError as e:
+ except ConfigParser.MissingSectionHeaderError as e:
# The file exists, but doesn't have the correct format.
raise exc.InvalidCredentialFile(e)
try:
self._read_credential_file(cfg)
- except (configparser.NoSectionError, configparser.NoOptionError) as e:
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError) as e:
raise exc.InvalidCredentialFile(e)
if region:
self.region = region
@@ -153,43 +455,43 @@ class BaseAuth(object):
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
"""
- If a valid token is already known, this call will use it to generate
- the service catalog.
+ If a valid token is already known, this call uses it to generate the
+ service catalog.
"""
- resp = self._call_token_auth(token, tenant_id, tenant_name)
- resp_body = resp.json()
+ resp, resp_body = self._call_token_auth(token, tenant_id, tenant_name)
self._parse_response(resp_body)
self.authenticated = True
def _call_token_auth(self, token, tenant_id, tenant_name):
- if not any((tenant_id, tenant_name)):
- raise exc.MissingAuthSettings("You must supply either the tenant "
- "name or tenant ID")
+ key = val = None
if tenant_id:
key = "tenantId"
val = tenant_id
- else:
+ elif tenant_name:
key = "tenantName"
val = tenant_name
+
body = {"auth": {
- key: val,
"token": {"id": token},
}}
+
+ if(key and val):
+ body["auth"][key] = val
+
headers = {"Content-Type": "application/json",
"Accept": "application/json",
}
- resp = self.method_post("tokens", data=body, headers=headers,
+ resp, resp_body = self.method_post("tokens", data=body, headers=headers,
std_headers=False)
if resp.status_code == 401:
# Invalid authorization
raise exc.AuthenticationFailed("Incorrect/unauthorized "
"credentials received")
elif resp.status_code > 299:
- msg_dict = resp.json()
- msg = msg_dict[msg_dict.keys()[0]]["message"]
+ msg = resp_body[resp_body.keys()[0]]["message"]
raise exc.AuthenticationFailed("%s - %s." % (resp.reason, msg))
- return resp
+ return resp, resp_body
def _read_credential_file(self, cfg):
@@ -201,7 +503,7 @@ class BaseAuth(object):
self.tenant_id = cfg.get("keystone", "tenant_id")
- def _get_credentials(self):
+ def _format_credentials(self):
"""
Returns the current credentials in the format expected by
the authentication service.
@@ -244,6 +546,10 @@ class BaseAuth(object):
def _call(self, mthd, uri, admin, data, headers, std_headers):
+ """
+ Handles all the common functionality required for API calls. Returns
+ the resulting response object.
+ """
if not uri.startswith("http"):
uri = "/".join((self.auth_endpoint.rstrip("/"), uri))
if admin:
@@ -255,21 +561,37 @@ class BaseAuth(object):
hdrs = {}
if headers:
hdrs.update(headers)
- kwargs = {"headers": hdrs,
- "body": data}
+ kwargs = {"headers": hdrs}
+ if data:
+ kwargs["body"] = data
if "tokens" in uri:
# We'll handle the exception here
kwargs["raise_exception"] = False
return pyrax.http.request(mthd, uri, **kwargs)
- def authenticate(self):
+ def authenticate(self, username=None, password=None, api_key=None,
+ tenant_id=None, connect=False):
"""
Using the supplied credentials, connects to the specified
- authentication endpoint and attempts to log in. If successful,
- records the token information.
+ authentication endpoint and attempts to log in.
+
+ Credentials can either be passed directly to this method, or
+ previously-stored credentials can be used. If authentication is
+ successful, the token and service catalog information is stored, and
+ clients for each service and region are created.
+
+ The 'connect' parameter is retained for backwards compatibility. It no
+ longer has any effect.
"""
- creds = self._get_credentials()
+ self.username = username or self.username or pyrax.get_setting(
+ "username")
+ # Different identity systems may pass these under inconsistent names.
+ self.password = password or self.password or api_key or self.api_key
+ self.api_key = api_key or self.api_key or self.password
+ self.tenant_id = tenant_id or self.tenant_id or pyrax.get_setting(
+ "tenant_id")
+ creds = self._format_credentials()
headers = {"Content-Type": "application/json",
"Accept": "application/json",
}
@@ -282,12 +604,14 @@ class BaseAuth(object):
"credentials received")
elif 500 <= resp.status_code < 600:
# Internal Server Error
- error_msg = resp.content or "Service Currently Unavailable"
- raise exc.InternalServerError(error_msg)
- elif 299 < resp.status_code < 500:
- msg_dict = resp.json()
try:
- msg = msg_dict[msg_dict.keys()[0]]["message"]
+ error_msg = resp_body[list(resp_body.keys())[0]]["message"]
+ except KeyError:
+ error_msg = "Service Currently Unavailable"
+ raise exc.InternalServerError(error_msg)
+ elif resp.status_code > 299:
+ try:
+ msg = resp_body[list(resp_body.keys())[0]]["message"]
except KeyError:
msg = None
if msg:
@@ -307,31 +631,8 @@ class BaseAuth(object):
self.tenant_id = token["tenant"]["id"]
self.tenant_name = token["tenant"]["name"]
self.expires = self._parse_api_time(token["expires"])
- svc_cat = access.get("serviceCatalog")
- self.services = {}
- for svc in svc_cat:
- # Replace any dashes with underscores.
- # Also, some service types are extensions that have vendor-specific
- # identifiers; strip them.
- typ = svc["type"].replace("-", "_").split(":")[-1]
- if typ == "compute":
- if svc["name"].lower() == "cloudservers":
- # First-generation Rackspace cloud servers
- continue
- self.services[typ] = dict(name=svc["name"], endpoints={})
- svc_ep = self.services[typ]["endpoints"]
- for ep in svc["endpoints"]:
- rgn = ep.get("region", "ALL")
- self.regions.add(rgn)
- svc_ep[rgn] = {}
- svc_ep[rgn]["public_url"] = ep["publicURL"]
- try:
- svc_ep[rgn]["internal_url"] = ep["internalURL"]
- except KeyError:
- pass
- self.regions.discard("ALL")
- pyrax.regions = tuple(self.regions)
- pyrax.services = tuple(self.services.keys())
+ self.service_catalog = access.get("serviceCatalog")
+ self._parse_service_catalog()
user = access["user"]
self.user = {}
self.user["id"] = user["id"]
@@ -339,13 +640,69 @@ class BaseAuth(object):
self.user["roles"] = user["roles"]
+ def _parse_service_catalog(self):
+ self.services = utils.DotDict()
+ self.regions = set()
+ for svc in self.service_catalog:
+ service = Service(self, svc)
+ if not hasattr(service, "endpoints"):
+ # Not an OpenStack service
+ continue
+ setattr(self.services, service.service_type, service)
+ self.regions.update(list(service.endpoints.keys()))
+ # Update the 'ALL' services to include all available regions.
+ self.regions.discard("ALL")
+ for nm, svc in list(self.services.items()):
+ eps = svc.endpoints
+ ep = eps.pop("ALL", None)
+ if ep:
+ for rgn in self.regions:
+ eps[rgn] = ep
+
+
+ def keyring_auth(self, username=None):
+ """
+ Uses the keyring module to retrieve the user's password or api_key.
+ """
+ if not keyring:
+ # Module not installed
+ raise exc.KeyringModuleNotInstalled("The 'keyring' Python module "
+ "is not installed on this system.")
+ if username is None:
+ username = pyrax.get_setting("keyring_username")
+ if not username:
+ raise exc.KeyringUsernameMissing("No username specified for "
+ "keyring authentication.")
+ password = keyring.get_password("pyrax", username)
+ if password is None:
+ raise exc.KeyringPasswordNotFound("No password was found for the "
+ "username '%s'." % username)
+ style = self._creds_style or self._default_creds_style
+ # Keyring username may be different than the credentials. Use the
+ # existing username, if present; otherwise, use the supplied username.
+ username = self.username or username
+ if style == "apikey":
+ return self.authenticate(username=username, api_key=password)
+ else:
+ return self.authenticate(username=username, password=password)
+
+
def unauthenticate(self):
"""
- Clears all authentication information.
+ Clears out any credentials, tokens, and service catalog info.
"""
- self.token = self.expires = self.tenant_id = self.tenant_name = ""
+ self.username = ""
+ self.password = ""
+ self.tenant_id = ""
+ self.tenant_name = ""
+ self.token = ""
+ self.expires = None
+ self.region = ""
+ self._creds_file = None
+ self.api_key = ""
+ self.services = utils.DotDict()
+ self.regions = utils.DotDict()
self.authenticated = False
- self.services = {}
def _standard_headers(self):
@@ -363,13 +720,14 @@ class BaseAuth(object):
"""
Returns a list of extensions enabled on this service.
"""
- resp = self.method_get("extensions")
- return resp.json().get("extensions", {}).get("values")
+ resp, resp_body = self.method_get("extensions")
+ return resp_body.get("extensions", {}).get("values")
def get_token(self, force=False):
- """Returns the auth token, if it is valid. If not, calls the auth endpoint
- to get a new token. Passing 'True' to 'force' will force a call for a new
+ """
+ Returns the auth token, if it is valid. If not, calls the auth endpoint
+ to get a new token. Passing 'True' to 'force' forces a call for a new
token, even if there already is a valid token.
"""
self.authenticated = self._has_valid_token()
@@ -392,12 +750,11 @@ class BaseAuth(object):
ADMIN ONLY. Returns a dict containing tokens, endpoints, user info, and
role metadata.
"""
- resp = self.method_get("tokens/%s" % self.token, admin=True)
+ resp, resp_body = self.method_get("tokens/%s" % self.token, admin=True)
if resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You must be an admin to make this "
"call.")
- token_dct = resp.json()
- return token_dct.get("access")
+ return resp_body.get("access")
def check_token(self, token=None):
@@ -407,7 +764,19 @@ class BaseAuth(object):
"""
if token is None:
token = self.token
- resp = self.method_head("tokens/%s" % token, admin=True)
+ resp, resp_body = self.method_head("tokens/%s" % token, admin=True)
+ if resp.status_code in (401, 403):
+ raise exc.AuthorizationFailure("You must be an admin to make this "
+ "call.")
+ return 200 <= resp.status_code < 300
+
+
+ def revoke_token(self, token):
+ """
+ ADMIN ONLY. Returns True or False, depending on whether deletion of the
+ specified token was successful.
+ """
+ resp, resp_body = self.method_delete("tokens/%s" % token, admin=True)
if resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You must be an admin to make this "
"call.")
@@ -418,12 +787,12 @@ class BaseAuth(object):
"""
ADMIN ONLY. Returns a list of all endpoints for the current auth token.
"""
- resp = self.method_get("tokens/%s/endpoints" % self.token, admin=True)
+ resp, resp_body = self.method_get("tokens/%s/endpoints" % self.token,
+ admin=True)
if resp.status_code in (401, 403, 404):
raise exc.AuthorizationFailure("You are not authorized to list "
"token endpoints.")
- token_dct = resp.json()
- return token_dct.get("access", {}).get("endpoints")
+ return resp_body.get("access", {}).get("endpoints")
def list_users(self):
@@ -432,20 +801,19 @@ class BaseAuth(object):
(account) if this request is issued by a user holding the admin role
(identity:user-admin).
"""
- resp = self.method_get("users", admin=True)
+ resp, resp_body = self.method_get("users", admin=True)
if resp.status_code in (401, 403, 404):
raise exc.AuthorizationFailure("You are not authorized to list "
"users.")
- users = resp.json()
- # The API is inconsistent; if only one user exists, it will not return
+ # The API is inconsistent; if only one user exists, it does not return
# a list.
- if "users" in users:
- users = users["users"]
+ if "users" in resp_body:
+ users = resp_body["users"]
else:
- users = [users]
+ users = resp_body
# The returned values may contain password data. Strip that out.
for user in users:
- bad_keys = [key for key in user.keys()
+ bad_keys = [key for key in list(user.keys())
if "password" in key.lower()]
for bad_key in bad_keys:
user.pop(bad_key)
@@ -456,10 +824,10 @@ class BaseAuth(object):
"""
ADMIN ONLY. Creates a new user for this tenant (account). The username
and email address must be supplied. You may optionally supply the
- password for this user; if not, the API server will generate a password
- and return it in the 'password' attribute of the resulting User object.
- NOTE: this is the ONLY time the password will be returned; after the
- initial user creation, there is NO WAY to retrieve the user's password.
+ password for this user; if not, the API server generates a password and
+ return it in the 'password' attribute of the resulting User object.
+ NOTE: this is the ONLY time the password is returned; after the initial
+ user creation, there is NO WAY to retrieve the user's password.
You may also specify that the user should be created but not active by
passing False to the enabled parameter.
@@ -473,24 +841,61 @@ class BaseAuth(object):
}}
if password:
data["user"]["OS-KSADM:password"] = password
- resp = self.method_post("users", data=data, admin=True)
+ resp, resp_body = self.method_post("users", data=data, admin=True)
if resp.status_code == 201:
- jresp = resp.json()
- return User(self, jresp)
+ return User(self, resp_body.get("user", resp_body))
elif resp.status_code in (401, 403, 404):
raise exc.AuthorizationFailure("You are not authorized to create "
"users.")
elif resp.status_code == 409:
raise exc.DuplicateUser("User '%s' already exists." % name)
elif resp.status_code == 400:
- status = json.loads(resp.text)
- message = status["badRequest"]["message"]
+ message = resp_body["badRequest"]["message"]
if "Expecting valid email address" in message:
raise exc.InvalidEmail("%s is not valid" % email)
else:
raise exc.BadRequest(message)
+ def find_user_by_name(self, name):
+ """
+ Returns a User object by searching for the supplied user name. Returns
+ None if there is no match for the given name.
+ """
+ raise NotImplementedError("This method is not supported.")
+
+
+ def find_user_by_email(self, email):
+ """
+ Returns a User object by searching for the supplied user's email
+ address. Returns None if there is no match for the given ID.
+ """
+ raise NotImplementedError("This method is not supported.")
+
+
+ def find_user_by_id(self, uid):
+ """
+ Returns a User object by searching for the supplied user ID. Returns
+ None if there is no match for the given ID.
+ """
+ raise NotImplementedError("This method is not supported.")
+
+
+ def get_user(self, user_id=None, username=None, email=None):
+ """
+ Returns the user specified by either ID, username or email.
+
+ Since more than user can have the same email address, searching by that
+ term returns a list of 1 or more User objects. Searching by username or
+ ID returns a single User.
+
+ If a user_id that doesn't belong to the current account is searched
+ for, a Forbidden exception is raised. When searching by username or
+ email, a NotFound exception is raised if there is no matching user.
+ """
+ raise NotImplementedError("This method is not supported.")
+
+
# Can we really update the ID? Docs seem to say we can
def update_user(self, user, email=None, username=None,
uid=None, enabled=None):
@@ -507,11 +912,11 @@ class BaseAuth(object):
if enabled is not None:
upd["enabled"] = enabled
data = {"user": upd}
- resp = self.method_put(uri, data=data)
+ resp, resp_body = self.method_put(uri, data=data)
if resp.status_code in (401, 403, 404):
raise exc.AuthorizationFailure("You are not authorized to update "
"users.")
- return User(self, resp.json())
+ return User(self, resp_body)
def delete_user(self, user):
@@ -522,7 +927,7 @@ class BaseAuth(object):
"""
user_id = utils.get_id(user)
uri = "users/%s" % user_id
- resp = self.method_delete(uri)
+ resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.UserNotFound("User '%s' does not exist." % user)
elif resp.status_code in (401, 403):
@@ -538,14 +943,37 @@ class BaseAuth(object):
"""
user_id = utils.get_id(user)
uri = "users/%s/roles" % user_id
- resp = self.method_get(uri)
+ resp, resp_body = self.method_get(uri)
if resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You are not authorized to list "
"user roles.")
- roles = resp.json().get("roles")
+ roles = resp_body.get("roles")
return roles
+ def list_credentials(self, user=None):
+ """
+ Returns a user's non-password credentials. If no user is specified, the
+ credentials for the currently authenticated user are returned.
+
+ You cannot retrieve passwords by this or any other means.
+ """
+ if not user:
+ user = self.user
+ user_id = utils.get_id(user)
+ uri = "users/%s/OS-KSADM/credentials" % user_id
+ resp, resp_body = self.method_get(uri)
+ return resp_body.get("credentials")
+
+
+ def reset_api_key(self, user=None):
+ """
+ Not available in basic Keystone identity.
+ """
+ raise NotImplementedError("The reset_api_key method is not "
+ "implemented.")
+
+
def get_tenant(self):
"""
Returns the tenant for the current user.
@@ -556,11 +984,12 @@ class BaseAuth(object):
return None
- def list_tenants(self):
+ def list_tenants(self, admin=True):
"""
- ADMIN ONLY. Returns a list of all tenants.
+ Lists all tenants associated with the currently authenticated
+ user (admin=False), or all tenants (admin=True).
"""
- return self._list_tenants(admin=True)
+ return self._list_tenants(admin)
def _list_tenants(self, admin):
@@ -568,9 +997,9 @@ class BaseAuth(object):
Returns either a list of all tenants (admin=True), or the tenant for
the currently-authenticated user (admin=False).
"""
- resp = self.method_get("tenants", admin=admin)
+ resp, resp_body = self.method_get("tenants", admin=admin)
if 200 <= resp.status_code < 300:
- tenants = resp.json().get("tenants", [])
+ tenants = resp_body.get("tenants", [])
return [Tenant(self, tenant) for tenant in tenants]
elif resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You are not authorized to list "
@@ -589,8 +1018,8 @@ class BaseAuth(object):
}}
if description:
data["tenant"]["description"] = description
- resp = self.method_post("tenants", data=data)
- return Tenant(self, resp.json())
+ resp, resp_body = self.method_post("tenants", data=data)
+ return Tenant(self, resp_body)
def update_tenant(self, tenant, name=None, description=None, enabled=True):
@@ -605,8 +1034,8 @@ class BaseAuth(object):
data["tenant"]["name"] = name
if description:
data["tenant"]["description"] = description
- resp = self.method_put("tenants/%s" % tenant_id, data=data)
- return Tenant(self, resp.json())
+ resp, resp_body = self.method_put("tenants/%s" % tenant_id, data=data)
+ return Tenant(self, resp_body)
def delete_tenant(self, tenant):
@@ -617,19 +1046,80 @@ class BaseAuth(object):
"""
tenant_id = utils.get_id(tenant)
uri = "tenants/%s" % tenant_id
- resp = self.method_delete(uri)
+ resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.TenantNotFound("Tenant '%s' does not exist." % tenant)
+ def list_roles(self, service_id=None, limit=None, marker=None):
+ """
+ Returns a list of all global roles for users, optionally limited by
+ service. Pagination can be handled through the standard 'limit' and
+ 'marker' parameters.
+ """
+ uri = "OS-KSADM/roles"
+ pagination_items = []
+ if service_id is not None:
+ pagination_items.append("serviceId=%s" % service_id)
+ if limit is not None:
+ pagination_items.append("limit=%s" % limit)
+ if marker is not None:
+ pagination_items.append("marker=%s" % marker)
+ pagination = "&".join(pagination_items)
+ if pagination:
+ uri = "%s?%s" % (uri, pagination)
+ resp, resp_body = self.method_get(uri)
+ roles = resp_body.get("roles", [])
+ return [Role(self, role) for role in roles]
+
+
+ def get_role(self, role):
+ """
+ Returns a Role object representing the specified parameter. The 'role'
+ parameter can be either an existing Role object, or the ID of the role.
+
+ If an invalid role is passed, a NotFound exception is raised.
+ """
+ uri = "OS-KSADM/roles/%s" % utils.get_id(role)
+ resp, resp_body = self.method_get(uri)
+ role = Role(self, resp_body.get("role"))
+ return role
+
+
+ def add_role_to_user(self, role, user):
+ """
+ Adds the specified role to the specified user.
+
+ There is no return value upon success. Passing a non-existent role or
+ user raises a NotFound exception.
+ """
+ uri = "users/%s/roles/OS-KSADM/%s" % (utils.get_id(user),
+ utils.get_id(role))
+ resp, resp_body = self.method_put(uri)
+
+
+ def delete_role_from_user(self, role, user):
+ """
+ Deletes the specified role from the specified user.
+
+ There is no return value upon success. Passing a non-existent role or
+ user raises a NotFound exception.
+ """
+ uri = "users/%s/roles/OS-KSADM/%s" % (utils.get_id(user),
+ utils.get_id(role))
+ resp, resp_body = self.method_delete(uri)
+
+
@staticmethod
def _parse_api_time(timestr):
"""
- Typical expiration times returned from the auth server are in this format:
+ Typical expiration times returned from the auth server are in this
+ format:
2012-05-02T14:27:40.000-05:00
They can also be returned as a UTC value in this format:
2012-05-02T14:27:40.000Z
- This method returns a proper datetime object from either of these formats.
+ This method returns a proper datetime object from either of these
+ formats.
"""
try:
reg_groups = API_DATE_PATTERN.match(timestr).groups()
diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/client.py b/awx/lib/site-packages/pyrax/cf_wrapper/client.py
index f4d92144f2..4f6522df7a 100644
--- a/awx/lib/site-packages/pyrax/cf_wrapper/client.py
+++ b/awx/lib/site-packages/pyrax/cf_wrapper/client.py
@@ -1,6 +1,23 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+# Copyright (c)2012 Rackspace US, Inc.
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
import datetime
from functools import wraps
import hashlib
@@ -44,6 +61,7 @@ CONNECTION_TIMEOUT = 20
CONNECTION_RETRIES = 5
AUTH_ATTEMPTS = 2
MAX_BULK_DELETE = 10000
+DEFAULT_CHUNKSIZE = 65536
no_such_container_pattern = re.compile(
r"Container (?:GET|HEAD) failed: .+/(.+) 404")
@@ -60,6 +78,17 @@ def _close_swiftclient_conn(conn):
pass
+def plug_hole_in_swiftclient_auth(clt, url):
+ """
+ This is necessary because swiftclient has an issue when a token expires and
+ it needs to re-authenticate against Rackspace auth. It is a temporary
+ workaround until we can fix swiftclient.
+ """
+ conn = clt.connection
+ conn.token = clt.identity.token
+ conn.url = url
+
+
def handle_swiftclient_exception(fnc):
@wraps(fnc)
def _wrapped(self, *args, **kwargs):
@@ -79,9 +108,9 @@ def handle_swiftclient_exception(fnc):
# Assume it is an auth failure. Re-auth and retry.
# NOTE: This is a hack to get around an apparent bug
# in python-swiftclient when using Rackspace auth.
- pyrax.authenticate(connect=False)
- if pyrax.identity.authenticated:
- pyrax.plug_hole_in_swiftclient_auth(self, clt_url)
+ self.identity.authenticate(connect=False)
+ if self.identity.authenticated:
+ self.plug_hole_in_swiftclient_auth(self, clt_url)
continue
elif e.http_status == 404:
bad_container = no_such_container_pattern.search(str_error)
@@ -102,6 +131,16 @@ def handle_swiftclient_exception(fnc):
return _wrapped
+def ensure_cdn(fnc):
+ @wraps(fnc)
+ def _wrapped(self, *args, **kwargs):
+ if not self.connection.cdn_connection:
+ raise exc.NotCDNEnabled("This service does not support "
+ "CDN-enabled containers.")
+ return fnc(self, *args, **kwargs)
+ return _wrapped
+
+
def _convert_head_object_last_modified_to_local(lm_str):
# Need to convert last modified time to a datetime object.
# Times are returned in default locale format, so we need to read
@@ -119,18 +158,32 @@ def _convert_head_object_last_modified_to_local(lm_str):
def _convert_list_last_modified_to_local(attdict):
- if 'last_modified' in attdict:
+ if "last_modified" in attdict:
attdict = attdict.copy()
- list_date_format_with_tz = LIST_DATE_FORMAT + ' %Z'
- last_modified_utc = attdict['last_modified'] + ' UTC'
+ list_date_format_with_tz = LIST_DATE_FORMAT + " %Z"
+ last_modified_utc = attdict["last_modified"] + " UTC"
tm_tuple = time.strptime(last_modified_utc,
list_date_format_with_tz)
dttm = datetime.datetime.fromtimestamp(time.mktime(tm_tuple))
- attdict['last_modified'] = dttm.strftime(DATE_FORMAT)
+ dttm_with_micros = datetime.datetime.strptime(last_modified_utc,
+ list_date_format_with_tz)
+ # Round the date *up* in seconds, to match the last modified time
+ # in head requests
+ # https://review.openstack.org/#/c/55488/
+ if dttm_with_micros.microsecond > 0:
+ dttm += datetime.timedelta(seconds=1)
+ attdict["last_modified"] = dttm.strftime(DATE_FORMAT)
return attdict
+def _quote(val):
+ if isinstance(val, six.text_type):
+ val = val.encode("utf-8")
+ return urllib.quote(val)
+
+
+
class CFClient(object):
"""
Wraps the calls to swiftclient with objects representing Containers
@@ -139,11 +192,6 @@ class CFClient(object):
These classes allow a developer to work with regular Python objects
instead of calling functions that return primitive types.
"""
- # Constants used in metadata headers
- account_meta_prefix = "X-Account-Meta-"
- container_meta_prefix = "X-Container-Meta-"
- object_meta_prefix = "X-Object-Meta-"
- cdn_meta_prefix = "X-Cdn-"
# Defaults for CDN
cdn_enabled = False
default_cdn_ttl = 86400
@@ -175,6 +223,24 @@ class CFClient(object):
http_log_debug=http_log_debug)
+ # Constants used in metadata headers
+ @property
+ def account_meta_prefix(self):
+ return "X-Account-Meta-"
+
+ @property
+ def container_meta_prefix(self):
+ return "X-Container-Meta-"
+
+ @property
+ def object_meta_prefix(self):
+ return "X-Object-Meta-"
+
+ @property
+ def cdn_meta_prefix(self):
+ return "X-Cdn-"
+
+
def _make_connections(self, auth_endpoint, username, api_key, password,
tenant_name=None, preauthurl=None, preauthtoken=None,
auth_version="2", os_options=None, verify_ssl=True,
@@ -210,12 +276,14 @@ class CFClient(object):
@handle_swiftclient_exception
- def get_account_metadata(self):
+ def get_account_metadata(self, prefix=None):
headers = self.connection.head_account()
- prfx = self.account_meta_prefix.lower()
+ if prefix is None:
+ prefix = self.account_meta_prefix
+ prefix = prefix.lower()
ret = {}
for hkey, hval in headers.iteritems():
- if hkey.lower().startswith(prfx):
+ if hkey.lower().startswith(prefix):
ret[hkey] = hval
return ret
@@ -336,11 +404,7 @@ class CFClient(object):
specified number of seconds.
"""
meta = {"X-Delete-After": str(seconds)}
- self.set_object_metadata(cont, obj, meta, prefix="")
-# cname = self._resolve_name(cont)
-# oname = self._resolve_name(obj)
-# self.connection.post_object(cname, oname, headers=headers,
-# response_dict=extra_info)
+ self.set_object_metadata(cont, obj, meta, prefix="", clear=True)
@handle_swiftclient_exception
@@ -349,7 +413,8 @@ class CFClient(object):
cname = self._resolve_name(container)
headers = self.connection.head_container(cname)
if prefix is None:
- prefix = self.container_meta_prefix.lower()
+ prefix = self.container_meta_prefix
+ prefix = prefix.lower()
ret = {}
for hkey, hval in headers.iteritems():
if hkey.lower().startswith(prefix):
@@ -394,19 +459,23 @@ class CFClient(object):
@handle_swiftclient_exception
def remove_container_metadata_key(self, container, key,
- extra_info=None):
+ prefix=None, extra_info=None):
"""
Removes the specified key from the container's metadata. If the key
does not exist in the metadata, nothing is done.
"""
+ if prefix is None:
+ prefix = self.container_meta_prefix
+ prefix = prefix.lower()
meta_dict = {key: ""}
# Add the metadata prefix, if needed.
- massaged = self._massage_metakeys(meta_dict, self.container_meta_prefix)
+ massaged = self._massage_metakeys(meta_dict, prefix)
cname = self._resolve_name(container)
self.connection.post_container(cname, massaged,
response_dict=extra_info)
+ @ensure_cdn
@handle_swiftclient_exception
def get_container_cdn_metadata(self, container):
"""
@@ -421,6 +490,7 @@ class CFClient(object):
return dict(headers)
+ @ensure_cdn
@handle_swiftclient_exception
def set_container_cdn_metadata(self, container, metadata):
"""
@@ -449,15 +519,17 @@ class CFClient(object):
@handle_swiftclient_exception
- def get_object_metadata(self, container, obj):
+ def get_object_metadata(self, container, obj, prefix=None):
"""Retrieves any metadata for the specified object."""
+ if prefix is None:
+ prefix = self.object_meta_prefix
cname = self._resolve_name(container)
oname = self._resolve_name(obj)
headers = self.connection.head_object(cname, oname)
- prfx = self.object_meta_prefix.lower()
+ prefix = prefix.lower()
ret = {}
for hkey, hval in headers.iteritems():
- if hkey.lower().startswith(prfx):
+ if hkey.lower().startswith(prefix):
ret[hkey] = hval
return ret
@@ -493,8 +565,8 @@ class CFClient(object):
# whereas for containers you need to set the values to an empty
# string to delete them.
if not clear:
- obj_meta = self.get_object_metadata(cname, oname)
- new_meta = self._massage_metakeys(obj_meta, self.object_meta_prefix)
+ obj_meta = self.get_object_metadata(cname, oname, prefix=prefix)
+ new_meta = self._massage_metakeys(obj_meta, prefix)
utils.case_insensitive_update(new_meta, massaged)
# Remove any empty values, since the object metadata API will
# store them.
@@ -509,12 +581,12 @@ class CFClient(object):
@handle_swiftclient_exception
- def remove_object_metadata_key(self, container, obj, key):
+ def remove_object_metadata_key(self, container, obj, key, prefix=None):
"""
- Removes the specified key from the storage object's metadata. If the key
- does not exist in the metadata, nothing is done.
+ Removes the specified key from the storage object's metadata. If the
+ key does not exist in the metadata, nothing is done.
"""
- self.set_object_metadata(container, obj, {key: ""})
+ self.set_object_metadata(container, obj, {key: ""}, prefix=prefix)
@handle_swiftclient_exception
@@ -629,33 +701,50 @@ class CFClient(object):
@handle_swiftclient_exception
def store_object(self, container, obj_name, data, content_type=None,
etag=None, content_encoding=None, ttl=None, return_none=False,
- extra_info=None):
+ chunk_size=None, headers=None, extra_info=None):
"""
Creates a new object in the specified container, and populates it with
the given data. A StorageObject reference to the uploaded file
will be returned, unless 'return_none' is set to True.
+ 'chunk_size' represents the number of bytes of data to write; it
+ defaults to 65536. It is used only if the the 'data' parameter is an
+ object with a 'read' method; otherwise, it is ignored.
+
+ If you wish to specify additional headers to be passed to the PUT
+ request, pass them as a dict in the 'headers' parameter. It is the
+ developer's responsibility to ensure that any headers are valid; pyrax
+ does no checking.
+
'extra_info' is an optional dictionary which will be
populated with 'status', 'reason', and 'headers' keys from the
underlying swiftclient call.
"""
cont = self.get_container(container)
- headers = {}
+ if headers is None:
+ headers = {}
if content_encoding is not None:
headers["Content-Encoding"] = content_encoding
if ttl is not None:
headers["X-Delete-After"] = ttl
- with utils.SelfDeletingTempfile() as tmp:
- with open(tmp, "wb") as tmpfile:
- try:
- tmpfile.write(data)
- except UnicodeEncodeError:
- udata = data.encode("utf-8")
- tmpfile.write(udata)
- with open(tmp, "rb") as tmpfile:
- self.connection.put_object(cont.name, obj_name,
- contents=tmpfile, content_type=content_type, etag=etag,
- headers=headers, response_dict=extra_info)
+ if chunk_size and hasattr(data, "read"):
+ # Chunked file-like object
+ self.connection.put_object(cont.name, obj_name, contents=data,
+ content_type=content_type, etag=etag, headers=headers,
+ chunk_size=chunk_size, response_dict=extra_info)
+ else:
+ with utils.SelfDeletingTempfile() as tmp:
+ with open(tmp, "wb") as tmpfile:
+ try:
+ tmpfile.write(data)
+ except UnicodeEncodeError:
+ udata = data.encode("utf-8")
+ tmpfile.write(udata)
+ with open(tmp, "rb") as tmpfile:
+ self.connection.put_object(cont.name, obj_name,
+ contents=tmpfile, content_type=content_type,
+ etag=etag, headers=headers, chunk_size=chunk_size,
+ response_dict=extra_info)
if return_none:
return None
else:
@@ -721,7 +810,7 @@ class CFClient(object):
def upload_file(self, container, file_or_path, obj_name=None,
content_type=None, etag=None, return_none=False,
content_encoding=None, ttl=None, extra_info=None,
- content_length=None):
+ content_length=None, headers=None):
"""
Uploads the specified file to the container. If no name is supplied,
the file's name will be used. Either a file path or an open file-like
@@ -734,6 +823,11 @@ class CFClient(object):
If the size of the file is known, it can be passed as `content_length`.
+ If you wish to specify additional headers to be passed to the PUT
+ request, pass them as a dict in the 'headers' parameter. It is the
+ developer's responsibility to ensure that any headers are valid; pyrax
+ does no checking.
+
If you wish for the object to be temporary, specify the time it should
be stored in seconds in the `ttl` parameter. If this is specified, the
object will be deleted after that number of seconds.
@@ -806,7 +900,8 @@ class CFClient(object):
raise InvalidUploadID("No filename provided and/or it cannot be "
"inferred from context")
- headers = {}
+ if headers is None:
+ headers = {}
if content_encoding is not None:
headers["Content-Encoding"] = content_encoding
if ttl is not None:
@@ -886,7 +981,8 @@ class CFClient(object):
def sync_folder_to_container(self, folder_path, container, delete=False,
- include_hidden=False, ignore=None, ignore_timestamps=False):
+ include_hidden=False, ignore=None, ignore_timestamps=False,
+ object_prefix="", verbose=False):
"""
Compares the contents of the specified folder, and checks to make sure
that the corresponding object is present in the specified container. If
@@ -909,22 +1005,43 @@ class CFClient(object):
file names, and any names that match any of the 'ignore' patterns will
not be uploaded. The patterns should be standard *nix-style shell
patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as
- 'program.pyc' and 'abcpyc'. """
+ 'program.pyc' and 'abcpyc'.
+
+ If `object_prefix` is set it will be appended to the object name when
+ it is checked and uploaded to the container. For example, if you use
+ sync_folder_to_container("folderToSync/", myContainer,
+ object_prefix="imgFolder") it will upload the files to the
+ container/imgFolder/... instead of just container/...
+
+ Set `verbose` to True to make it print what is going on. It will
+ show which files are being uploaded and which ones are not and why.
+ """
cont = self.get_container(container)
self._local_files = []
+ # Load a list of all the remote objects so we don't have to keep
+ # hitting the service
+ if verbose:
+ log = logging.getLogger("pyrax")
+ log.info("Loading remote object list (prefix=%s)", object_prefix)
+ data = cont.get_objects(prefix=object_prefix, full_listing=True)
+ self._remote_files = dict((d.name, d) for d in data)
self._sync_folder_to_container(folder_path, cont, prefix="",
delete=delete, include_hidden=include_hidden, ignore=ignore,
- ignore_timestamps=ignore_timestamps)
+ ignore_timestamps=ignore_timestamps,
+ object_prefix=object_prefix, verbose=verbose)
+ # Unset the _remote_files
+ self._remote_files = None
def _sync_folder_to_container(self, folder_path, cont, prefix, delete,
- include_hidden, ignore, ignore_timestamps):
+ include_hidden, ignore, ignore_timestamps, object_prefix, verbose):
"""
This is the internal method that is called recursively to handle
nested folder structures.
"""
fnames = os.listdir(folder_path)
ignore = utils.coerce_string_to_list(ignore)
+ log = logging.getLogger("pyrax")
if not include_hidden:
ignore.append(".*")
for fname in fnames:
@@ -937,17 +1054,20 @@ class CFClient(object):
subprefix = "%s/%s" % (prefix, subprefix)
self._sync_folder_to_container(pth, cont, prefix=subprefix,
delete=delete, include_hidden=include_hidden,
- ignore=ignore, ignore_timestamps=ignore_timestamps)
+ ignore=ignore, ignore_timestamps=ignore_timestamps,
+ object_prefix=object_prefix, verbose=verbose)
continue
- self._local_files.append(os.path.join(prefix, fname))
+ self._local_files.append(os.path.join(object_prefix, prefix, fname))
local_etag = utils.get_checksum(pth)
fullname = fname
+ fullname_with_prefix = "%s/%s" % (object_prefix, fname)
if prefix:
fullname = "%s/%s" % (prefix, fname)
+ fullname_with_prefix = "%s/%s/%s" % (object_prefix, prefix, fname)
try:
- obj = cont.get_object(fullname)
+ obj = self._remote_files[fullname_with_prefix]
obj_etag = obj.etag
- except exc.NoSuchObject:
+ except KeyError:
obj = None
obj_etag = None
if local_etag != obj_etag:
@@ -961,19 +1081,29 @@ class CFClient(object):
local_mod_str = local_mod.isoformat()
if obj_time_str >= local_mod_str:
# Remote object is newer
+ if verbose:
+ log.info("%s NOT UPLOADED because remote object is "
+ "newer", fullname)
continue
- cont.upload_file(pth, obj_name=fullname, etag=local_etag,
- return_none=True)
+ cont.upload_file(pth, obj_name=fullname_with_prefix,
+ etag=local_etag, return_none=True)
+ if verbose:
+ log.info("%s UPLOADED", fullname)
+ else:
+ if verbose:
+ log.info("%s NOT UPLOADED because it already exists",
+ fullname)
if delete and not prefix:
- self._delete_objects_not_in_list(cont)
+ self._delete_objects_not_in_list(cont, object_prefix)
- def _delete_objects_not_in_list(self, cont):
+ def _delete_objects_not_in_list(self, cont, object_prefix=""):
"""
Finds all the objects in the specified container that are not present
in the self._local_files list, and deletes them.
"""
- objnames = set(cont.get_object_names(full_listing=True))
+ objnames = set(cont.get_object_names(prefix=object_prefix,
+ full_listing=True))
localnames = set(self._local_files)
to_delete = list(objnames.difference(localnames))
# We don't need to wait around for this to complete. Store the thread
@@ -1067,6 +1197,52 @@ class CFClient(object):
return ret
+ def fetch_dlo(self, cont, name, chunk_size=None):
+ """
+ Returns a list of 2-tuples in the form of (object_name,
+ fetch_generator) representing the components of a multi-part DLO
+ (Dynamic Large Object). Each fetch_generator object can be interated
+ to retrieve its contents.
+
+ This is useful when transferring a DLO from one object storage system
+ to another. Examples would be copying DLOs from one region of a
+ provider to another, or copying a DLO from one provider to another.
+ """
+ if chunk_size is None:
+ chunk_size = DEFAULT_CHUNKSIZE
+
+ class FetchChunker(object):
+ """
+ Class that takes the generator objects returned by a chunked
+ fetch_object() call and wraps them to behave as file-like objects for
+ uploading.
+ """
+ def __init__(self, gen, verbose=False):
+ self.gen = gen
+ self.verbose = verbose
+ self.processed = 0
+ self.interval = 0
+
+ def read(self, size=None):
+ self.interval += 1
+ if self.verbose:
+ if self.interval > 1024:
+ self.interval = 0
+ logit(".")
+ ret = self.gen.next()
+ self.processed += len(ret)
+ return ret
+
+ parts = self.get_container_objects(cont, prefix=name)
+ fetches = [(part.name, self.fetch_object(cont, part.name,
+ chunk_size=chunk_size))
+ for part in parts
+ if part.name != name]
+ job = [(fetch[0], FetchChunker(fetch[1], verbose=False))
+ for fetch in fetches]
+ return job
+
+
@handle_swiftclient_exception
def download_object(self, container, obj, directory, structure=True):
"""
@@ -1096,7 +1272,8 @@ class CFClient(object):
@handle_swiftclient_exception
def get_all_containers(self, limit=None, marker=None, **parms):
- hdrs, conts = self.connection.get_container("")
+ hdrs, conts = self.connection.get_container("", limit=limit,
+ marker=marker)
ret = [Container(self, name=cont["name"], object_count=cont["count"],
total_bytes=cont["bytes"]) for cont in conts]
return ret
@@ -1123,6 +1300,7 @@ class CFClient(object):
total_bytes=hdrs.get("x-container-bytes-used"))
self._container_cache[cname] = cont
return cont
+ get = get_container
@handle_swiftclient_exception
@@ -1145,6 +1323,7 @@ class CFClient(object):
attdict=_convert_list_last_modified_to_local(obj))
for obj in objs
if "name" in obj]
+ list_container_objects = get_container_objects
@handle_swiftclient_exception
@@ -1156,24 +1335,26 @@ class CFClient(object):
full_listing=full_listing)
cont = self.get_container(cname)
return [obj["name"] for obj in objs]
+ list_container_object_names = get_container_object_names
@handle_swiftclient_exception
def list_container_subdirs(self, container, marker=None, limit=None,
prefix=None, delimiter=None, full_listing=False):
"""
- Return a list of StorageObjects representing the pseudo-subdirectories
+ Returns a list of StorageObjects representing the pseudo-subdirectories
in the specified container. You can use the marker and limit params to
- handle pagination, and the prefix and delimiter params to filter the
- objects returned.
+ handle pagination, and the prefix param to filter the objects returned.
+ The 'delimiter' parameter is ignored, as the only meaningful value is
+ '/'.
"""
cname = self._resolve_name(container)
hdrs, objs = self.connection.get_container(cname, marker=marker,
- limit=limit, prefix=prefix, delimiter=delimiter,
+ limit=limit, prefix=prefix, delimiter="/",
full_listing=full_listing)
cont = self.get_container(cname)
return [StorageObject(self, container=cont, attdict=obj) for obj in objs
- if obj.get("content_type") == "application/directory"]
+ if "subdir" in obj]
@handle_swiftclient_exception
@@ -1189,17 +1370,21 @@ class CFClient(object):
@handle_swiftclient_exception
def list(self, limit=None, marker=None, **parms):
"""Returns a list of all container objects."""
- hdrs, conts = self.connection.get_container("")
+ hdrs, conts = self.connection.get_container("", limit=limit,
+ marker=marker)
ret = [self.get_container(cont["name"]) for cont in conts]
return ret
+ get_all_containers = list
@handle_swiftclient_exception
def list_containers(self, limit=None, marker=None, **parms):
"""Returns a list of all container names as strings."""
- hdrs, conts = self.connection.get_container("")
+ hdrs, conts = self.connection.get_container("", limit=limit,
+ marker=marker)
ret = [cont["name"] for cont in conts]
return ret
+ list_container_names = list_containers
@handle_swiftclient_exception
@@ -1212,10 +1397,12 @@ class CFClient(object):
count - the number of objects in the container
bytes - the total bytes in the container
"""
- hdrs, conts = self.connection.get_container("")
+ hdrs, conts = self.connection.get_container("", limit=limit,
+ marker=marker)
return conts
+ @ensure_cdn
@handle_swiftclient_exception
def list_public_containers(self):
"""Returns a list of all CDN-enabled containers."""
@@ -1240,6 +1427,7 @@ class CFClient(object):
return self._cdn_set_access(container, None, False)
+ @ensure_cdn
def _cdn_set_access(self, container, ttl, enabled):
"""Used to enable or disable CDN access on a container."""
if ttl is None:
@@ -1273,6 +1461,7 @@ class CFClient(object):
cont.cdn_log_retention = enabled
+ @ensure_cdn
def _set_cdn_log_retention(self, container, enabled):
"""This does the actual call to the Cloud Files API."""
hdrs = {"X-Log-Retention": "%s" % enabled}
@@ -1324,6 +1513,7 @@ class CFClient(object):
return self.set_container_metadata(container, hdr, clear=False)
+ @ensure_cdn
@handle_swiftclient_exception
def purge_cdn_object(self, container, name, email_addresses=None):
ct = self.get_container(container)
@@ -1413,12 +1603,7 @@ class Connection(_swift_client.Connection):
Taken directly from the cloudfiles library and modified for use here.
"""
- def quote(val):
- if isinstance(val, six.text_type):
- val = val.encode("utf-8")
- return urllib.quote(val)
-
- pth = "/".join([quote(elem) for elem in path])
+ pth = "/".join([_quote(elem) for elem in path])
uri_path = urlparse.urlparse(self.uri).path
path = "%s/%s" % (uri_path.rstrip("/"), pth)
headers = {"Content-Length": str(len(data)),
@@ -1440,8 +1625,8 @@ class Connection(_swift_client.Connection):
response = None
if response:
if response.status == 401:
- pyrax.identity.authenticate()
- headers["X-Auth-Token"] = pyrax.identity.token
+ self.identity.authenticate()
+ headers["X-Auth-Token"] = self.identity.token
else:
break
attempt += 1
@@ -1532,14 +1717,14 @@ class BulkDeleter(threading.Thread):
cname = client._resolve_name(container)
parsed, conn = client.connection.http_connection()
method = "DELETE"
- headers = {"X-Auth-Token": pyrax.identity.token,
+ headers = {"X-Auth-Token": self.client.identity.token,
"Content-type": "text/plain",
}
while object_names:
this_batch, object_names = (object_names[:MAX_BULK_DELETE],
object_names[MAX_BULK_DELETE:])
obj_paths = ("%s/%s" % (cname, nm) for nm in this_batch)
- body = "\n".join(obj_paths)
+ body = _quote("\n".join(obj_paths))
pth = "%s/?bulk-delete=1" % parsed.path
conn.request(method, pth, body, headers)
resp = conn.getresponse()
@@ -1547,7 +1732,7 @@ class BulkDeleter(threading.Thread):
reason = resp.reason
resp_body = resp.read()
for resp_line in resp_body.splitlines():
- if not resp_line:
+ if not resp_line.strip():
continue
resp_key, val = resp_line.split(":", 1)
result_key = res_keys.get(resp_key)
diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/container.py b/awx/lib/site-packages/pyrax/cf_wrapper/container.py
index 31689e0e67..cb0575465b 100644
--- a/awx/lib/site-packages/pyrax/cf_wrapper/container.py
+++ b/awx/lib/site-packages/pyrax/cf_wrapper/container.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -56,6 +56,10 @@ class Container(object):
def _fetch_cdn_data(self):
"""Fetches the object's CDN data from the CDN service"""
+ if not self.client.cdn_enabled:
+ # Not CDN enabled; set the defaults.
+ self._set_cdn_defaults()
+ return
response = self.client.connection.cdn_request("HEAD", [self.name])
if 200 <= response.status < 300:
# Set defaults in case not all headers are present.
@@ -95,6 +99,7 @@ class Container(object):
limit=limit, prefix=prefix, delimiter=delimiter,
full_listing=full_listing)
return objs
+ list = get_objects
def get_object(self, name, cached=True):
@@ -113,6 +118,7 @@ class Container(object):
ret = self.client.get_object(self, name)
self._object_cache[name] = ret
return ret
+ get = get_object
def get_object_names(self, marker=None, limit=None, prefix=None,
@@ -124,6 +130,7 @@ class Container(object):
return self.client.get_container_object_names(self.name, marker=marker,
limit=limit, prefix=prefix, delimiter=delimiter,
full_listing=full_listing)
+ list_object_names = get_object_names
def list_subdirs(self, marker=None, limit=None, prefix=None, delimiter=None,
@@ -244,11 +251,11 @@ class Container(object):
structure=structure)
- def get_metadata(self):
+ def get_metadata(self, prefix=None):
"""
Returns a dictionary containing the metadata for the container.
"""
- return self.client.get_container_metadata(self)
+ return self.client.get_container_metadata(self, prefix=prefix)
def set_metadata(self, metadata, clear=False, prefix=None):
@@ -273,12 +280,13 @@ class Container(object):
prefix=prefix)
- def remove_metadata_key(self, key):
+ def remove_metadata_key(self, key, prefix=None):
"""
Removes the specified key from the container's metadata. If the key
does not exist in the metadata, nothing is done.
"""
- return self.client.remove_container_metadata_key(self, key)
+ return self.client.remove_container_metadata_key(self, key,
+ prefix=prefix)
def set_web_index_page(self, page):
diff --git a/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py b/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py
index 1eee33178d..54dad378e3 100644
--- a/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py
+++ b/awx/lib/site-packages/pyrax/cf_wrapper/storage_object.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -106,9 +106,10 @@ class StorageObject(object):
name=self.name, email_addresses=email_addresses)
- def get_metadata(self):
+ def get_metadata(self, prefix=None):
"""Returns this object's metadata."""
- return self.client.get_object_metadata(self.container, self)
+ return self.client.get_object_metadata(self.container, self,
+ prefix=prefix)
def set_metadata(self, metadata, clear=False, prefix=None):
@@ -119,12 +120,13 @@ class StorageObject(object):
clear=clear, prefix=prefix)
- def remove_metadata_key(self, key):
+ def remove_metadata_key(self, key, prefix=None):
"""
Removes the specified key from the storage object's metadata. If the
key does not exist in the metadata, nothing is done.
"""
- self.client.remove_object_metadata_key(self.container, self, key)
+ self.client.remove_object_metadata_key(self.container, self, key,
+ prefix=prefix)
def copy(self, new_container, new_obj_name=None, extra_info=None):
diff --git a/awx/lib/site-packages/pyrax/client.py b/awx/lib/site-packages/pyrax/client.py
index e601a7545c..0b8ab0a3ad 100644
--- a/awx/lib/site-packages/pyrax/client.py
+++ b/awx/lib/site-packages/pyrax/client.py
@@ -1,7 +1,10 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc.
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -20,17 +23,30 @@
OpenStack Client interface. Handles the REST calls and responses.
"""
+from __future__ import absolute_import
+
import json
import logging
import requests
import time
-import urllib
-import urlparse
+import six.moves.urllib as urllib
import pyrax
import pyrax.exceptions as exc
+def _safe_quote(val):
+ """
+ Unicode values will raise a KeyError, so catch those and encode in UTF-8.
+ """
+ SAFE_QUOTE_CHARS = "/.?&=,"
+ try:
+ ret = urllib.parse.quote(val, safe=SAFE_QUOTE_CHARS)
+ except KeyError:
+ ret = urllib.parse.quote(val.encode("utf-8"), safe=SAFE_QUOTE_CHARS)
+ return ret
+
+
class BaseClient(object):
"""
The base class for all pyrax clients.
@@ -40,10 +56,11 @@ class BaseClient(object):
# Each client subclass should set their own name.
name = "base"
- def __init__(self, region_name=None, endpoint_type="publicURL",
+ def __init__(self, identity, region_name=None, endpoint_type=None,
management_url=None, service_name=None, timings=False,
verify_ssl=True, http_log_debug=False, timeout=None):
self.version = "v1.1"
+ self.identity = identity
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_name = service_name
@@ -114,7 +131,7 @@ class BaseClient(object):
def unauthenticate(self):
"""Clears all of our authentication information."""
- pyrax.identity.unauthenticate()
+ self.identity.unauthenticate()
def get_timings(self):
@@ -158,6 +175,11 @@ class BaseClient(object):
kwargs.setdefault("headers", kwargs.get("headers", {}))
kwargs["headers"]["User-Agent"] = self.user_agent
kwargs["headers"]["Accept"] = "application/json"
+ if ("body" in kwargs) or ("data" in kwargs):
+ if "Content-Type" not in kwargs["headers"]:
+ kwargs["headers"]["Content-Type"] = "application/json"
+ elif kwargs["headers"]["Content-Type"] is None:
+ del kwargs["headers"]["Content-Type"]
# Allow subclasses to add their own headers
self._add_custom_headers(kwargs["headers"])
resp, body = pyrax.http.request(method, uri, *args, **kwargs)
@@ -181,7 +203,7 @@ class BaseClient(object):
the request after authenticating if the initial request returned
and Unauthorized exception.
"""
- id_svc = pyrax.identity
+ id_svc = self.identity
if not all((self.management_url, id_svc.token, id_svc.tenant_id)):
id_svc.authenticate()
@@ -191,16 +213,15 @@ class BaseClient(object):
raise exc.ServiceNotAvailable("The '%s' service is not available."
% self)
if uri.startswith("http"):
- parsed = list(urlparse.urlparse(uri))
+ parsed = list(urllib.parse.urlparse(uri))
for pos, item in enumerate(parsed):
if pos < 2:
# Don't escape the scheme or netloc
continue
- parsed[pos] = urllib.quote(parsed[pos], safe="/.?&=,")
- safe_uri = urlparse.urlunparse(parsed)
+ parsed[pos] = _safe_quote(parsed[pos])
+ safe_uri = urllib.parse.urlunparse(parsed)
else:
- safe_uri = "%s%s" % (self.management_url,
- urllib.quote(uri, safe="/.?&=,"))
+ safe_uri = "%s%s" % (self.management_url, _safe_quote(uri))
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
@@ -258,7 +279,7 @@ class BaseClient(object):
to modify this method. Please post your findings on GitHub so that
others can benefit.
"""
- return pyrax.identity.authenticate()
+ return self.identity.authenticate()
@property
@@ -267,4 +288,4 @@ class BaseClient(object):
The older parts of this code used 'projectid'; this wraps that
reference.
"""
- return pyrax.identity.tenant_id
+ return self.identity.tenant_id
diff --git a/awx/lib/site-packages/pyrax/cloudblockstorage.py b/awx/lib/site-packages/pyrax/cloudblockstorage.py
index befeb61239..9d13678a3c 100644
--- a/awx/lib/site-packages/pyrax/cloudblockstorage.py
+++ b/awx/lib/site-packages/pyrax/cloudblockstorage.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -27,6 +27,7 @@ from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
+import pyrax.utils as utils
MIN_SIZE = 100
@@ -129,7 +130,9 @@ class CloudBlockStorageVolume(BaseResource):
def __init__(self, *args, **kwargs):
super(CloudBlockStorageVolume, self).__init__(*args, **kwargs)
region = self.manager.api.region_name
- self._nova_volumes = pyrax.connect_to_cloudservers(region).volumes
+ context = self.manager.api.identity
+ cs = pyrax.connect_to_cloudservers(region=region, context=context)
+ self._nova_volumes = cs.volumes
def attach_to_instance(self, instance, mountpoint):
@@ -286,6 +289,24 @@ class CloudBlockStorageManager(BaseManager):
raise
+ def update(self, volume, display_name=None, display_description=None):
+ """
+ Update the specified values on the specified volume. You may specify
+ one or more values to update.
+ """
+ uri = "/%s/%s" % (self.uri_base, utils.get_id(volume))
+ param_dict = {}
+ if display_name:
+ param_dict["display_name"] = display_name
+ if display_description:
+ param_dict["display_description"] = display_description
+ if not param_dict:
+ # Nothing to do!
+ return
+ body = {"volume": param_dict}
+ resp, resp_body = self.api.method_put(uri, body=body)
+
+
def list_snapshots(self):
"""
Pass-through method to allow the list_snapshots() call to be made
@@ -354,6 +375,24 @@ class CloudBlockStorageSnapshotManager(BaseManager):
return snap
+ def update(self, snapshot, display_name=None, display_description=None):
+ """
+ Update the specified values on the specified snapshot. You may specify
+ one or more values to update.
+ """
+ uri = "/%s/%s" % (self.uri_base, utils.get_id(snapshot))
+ param_dict = {}
+ if display_name:
+ param_dict["display_name"] = display_name
+ if display_description:
+ param_dict["display_description"] = display_description
+ if not param_dict:
+ # Nothing to do!
+ return
+ body = {"snapshot": param_dict}
+ resp, resp_body = self.api.method_put(uri, body=body)
+
+
class CloudBlockStorageClient(BaseClient):
"""
This is the primary class for interacting with Cloud Block Storage.
@@ -404,6 +443,16 @@ class CloudBlockStorageClient(BaseClient):
return volume.delete(force=force)
+ @assure_volume
+ def update(self, volume, display_name=None, display_description=None):
+ """
+ Update the specified values on the specified volume. You may specify
+ one or more values to update.
+ """
+ return self._manager.update(volume, display_name=display_name,
+ display_description=display_description)
+
+
@assure_volume
def create_snapshot(self, volume, name=None, description=None, force=False):
"""
@@ -416,7 +465,25 @@ class CloudBlockStorageClient(BaseClient):
description=description, force=force)
+ def get_snapshot(self, snapshot):
+ """
+ Returns the snapshot with the specified snapshot ID value.
+ """
+ return self._snapshot_manager.get(snapshot)
+
+
@assure_snapshot
def delete_snapshot(self, snapshot):
"""Deletes the snapshot."""
return snapshot.delete()
+
+
+ def update_snapshot(self, snapshot, display_name=None,
+ display_description=None):
+ """
+ Update the specified values on the specified snapshot. You may specify
+ one or more values to update.
+ """
+ return self._snapshot_manager.update(snapshot,
+ display_name=display_name,
+ display_description=display_description)
diff --git a/awx/lib/site-packages/pyrax/clouddatabases.py b/awx/lib/site-packages/pyrax/clouddatabases.py
index 3acc781f78..0350c54a48 100644
--- a/awx/lib/site-packages/pyrax/clouddatabases.py
+++ b/awx/lib/site-packages/pyrax/clouddatabases.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
diff --git a/awx/lib/site-packages/pyrax/clouddns.py b/awx/lib/site-packages/pyrax/clouddns.py
index 2e57bfb521..f216059556 100644
--- a/awx/lib/site-packages/pyrax/clouddns.py
+++ b/awx/lib/site-packages/pyrax/clouddns.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -871,12 +871,14 @@ class CloudDNSManager(BaseManager):
Takes a device and device type and returns the corresponding HREF link
and service name for use with PTR record management.
"""
+ context = self.api.identity
+ region = self.api.region_name
if device_type.lower().startswith("load"):
- ep = pyrax._get_service_endpoint("load_balancer")
+ ep = pyrax._get_service_endpoint(context, "load_balancer", region)
svc = "loadbalancers"
svc_name = "cloudLoadBalancers"
else:
- ep = pyrax._get_service_endpoint("compute")
+ ep = pyrax._get_service_endpoint(context, "compute", region)
svc = "servers"
svc_name = "cloudServersOpenStack"
href = "%s/%s/%s" % (ep, svc, utils.get_id(device))
diff --git a/awx/lib/site-packages/pyrax/cloudloadbalancers.py b/awx/lib/site-packages/pyrax/cloudloadbalancers.py
index 1dcc142aaa..1af39642f3 100644
--- a/awx/lib/site-packages/pyrax/cloudloadbalancers.py
+++ b/awx/lib/site-packages/pyrax/cloudloadbalancers.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -85,9 +85,15 @@ class CloudLoadBalancer(BaseResource):
return self.manager.get_usage(self, start=start, end=end)
+ def get_stats(self):
+ """
+ Return the stats for this loadbalancer
+ """
+ return self.manager.get_stats(self)
+
def _add_details(self, info):
"""Override the base behavior to add Nodes, VirtualIPs, etc."""
- for (key, val) in info.iteritems():
+ for (key, val) in six.iteritems(info):
if key == "nodes":
val = [Node(parent=self, **nd) for nd in val]
elif key == "sessionPersistence":
@@ -956,7 +962,7 @@ class CloudLoadBalancerManager(BaseManager):
return body
- def get_stats(self, loadbalancer):
+ def get_stats(self, loadbalancer=None):
"""
Returns statistics for the given load balancer.
"""
diff --git a/awx/lib/site-packages/pyrax/cloudmonitoring.py b/awx/lib/site-packages/pyrax/cloudmonitoring.py
index 2d37b0985f..b1b5489aca 100644
--- a/awx/lib/site-packages/pyrax/cloudmonitoring.py
+++ b/awx/lib/site-packages/pyrax/cloudmonitoring.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2013 Rackspace
+# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -162,7 +162,7 @@ class CloudMonitorNotificationManager(BaseManager):
"details": details,
}
resp, resp_body = self.api.method_post(uri, body=body)
- return self.get(resp["x-object-id"])
+ return self.get(resp.headers["x-object-id"])
def test_notification(self, notification=None, notification_type=None,
@@ -259,7 +259,7 @@ class CloudMonitorNotificationPlanManager(BaseManager):
ok_state = utils.coerce_string_to_list(ok_state)
body["ok_state"] = make_list_of_ids(ok_state)
resp, resp_body = self.api.method_post(uri, body=body)
- return self.get(resp["x-object-id"])
+ return self.get(resp.headers["x-object-id"])
class CloudMonitorEntityManager(BaseManager):
@@ -380,9 +380,8 @@ class CloudMonitorEntityManager(BaseManager):
raise exc.InvalidMonitoringCheckDetails("Validation "
"failed. Error: '%s'." % dtls)
else:
- status = resp["status"]
- if status == "201":
- check_id = resp["x-object-id"]
+ if resp.status_code == 201:
+ check_id = resp.headers["x-object-id"]
return self.get_check(entity, check_id)
@@ -564,12 +563,11 @@ class CloudMonitorEntityManager(BaseManager):
if metadata:
body["metadata"] = metadata
resp, resp_body = self.api.method_post(uri, body=body)
-
- status = resp["status"]
- if status == "201":
- alarm_id = resp["x-object-id"]
+ if resp.status_code == 201:
+ alarm_id = resp.headers["x-object-id"]
return self.get_alarm(entity, alarm_id)
+
def update_alarm(self, entity, alarm, criteria=None, disabled=False,
label=None, name=None, metadata=None):
"""
@@ -948,9 +946,8 @@ class CloudMonitorClient(BaseClient):
resp = self._entity_manager.create(label=label, name=name, agent=agent,
ip_addresses=ip_addresses, metadata=metadata,
return_response=True)
- status = resp["status"]
- if status == "201":
- ent_id = resp["x-object-id"]
+ if resp.status_code == 201:
+ ent_id = resp.headers["x-object-id"]
return self.get_entity(ent_id)
diff --git a/awx/lib/site-packages/pyrax/cloudnetworks.py b/awx/lib/site-packages/pyrax/cloudnetworks.py
index 5f54322f66..43713d6255 100644
--- a/awx/lib/site-packages/pyrax/cloudnetworks.py
+++ b/awx/lib/site-packages/pyrax/cloudnetworks.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2013 Rackspace
+# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
diff --git a/awx/lib/site-packages/pyrax/exceptions.py b/awx/lib/site-packages/pyrax/exceptions.py
index f9547dd05c..c73af611a8 100644
--- a/awx/lib/site-packages/pyrax/exceptions.py
+++ b/awx/lib/site-packages/pyrax/exceptions.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -245,6 +245,18 @@ class NetworkNotFound(PyraxException):
class NetworkLabelNotUnique(PyraxException):
pass
+class NoClientForService(PyraxException):
+ pass
+
+class NoEndpointForRegion(PyraxException):
+ pass
+
+class NoEndpointForService(PyraxException):
+ pass
+
+class NoContentSpecified(PyraxException):
+ pass
+
class NoMoreResults(PyraxException):
pass
@@ -254,6 +266,9 @@ class NoReloadError(PyraxException):
class NoSSLTerminationConfiguration(PyraxException):
pass
+class NoSuchClient(PyraxException):
+ pass
+
class NoSuchContainer(PyraxException):
pass
@@ -466,6 +481,8 @@ def from_response(response, body):
else:
message = error
details = None
+ else:
+ message = body
return cls(code=status, message=message, details=details,
request_id=request_id)
else:
diff --git a/awx/lib/site-packages/pyrax/fakes.py b/awx/lib/site-packages/pyrax/fakes.py
index b5b72313e7..45909007b6 100644
--- a/awx/lib/site-packages/pyrax/fakes.py
+++ b/awx/lib/site-packages/pyrax/fakes.py
@@ -12,10 +12,6 @@ from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
-from pyrax.cf_wrapper.client import BulkDeleter
-from pyrax.cf_wrapper.client import FolderUploader
-from pyrax.cf_wrapper.container import Container
-from pyrax.cf_wrapper.storage_object import StorageObject
from pyrax.client import BaseClient
from pyrax.clouddatabases import CloudDatabaseClient
from pyrax.clouddatabases import CloudDatabaseDatabaseManager
@@ -49,6 +45,13 @@ from pyrax.image import ImageClient
from pyrax.image import ImageManager
from pyrax.image import ImageMemberManager
from pyrax.image import ImageTagManager
+from pyrax.object_storage import BulkDeleter
+from pyrax.object_storage import Container
+from pyrax.object_storage import ContainerManager
+from pyrax.object_storage import FolderUploader
+from pyrax.object_storage import StorageClient
+from pyrax.object_storage import StorageObject
+from pyrax.object_storage import StorageObjectManager
from pyrax.queueing import Queue
from pyrax.queueing import QueueClaim
from pyrax.queueing import QueueMessage
@@ -56,6 +59,9 @@ from pyrax.queueing import QueueClient
from pyrax.queueing import QueueManager
import pyrax.exceptions as exc
+from pyrax.base_identity import BaseIdentity
+from pyrax.base_identity import Endpoint
+from pyrax.base_identity import Service
from pyrax.identity.rax_identity import RaxIdentity
from pyrax.identity.keystone_identity import KeystoneIdentity
import pyrax.utils as utils
@@ -88,41 +94,72 @@ class FakeResponse(object):
return "Line1\nLine2"
def get(self, arg):
- pass
+ return self.headers.get(arg)
def json(self):
return self.content
+class FakeIterator(utils.ResultsIterator):
+ def _init_methods(self):
+ pass
+
+
class FakeClient(object):
user_agent = "Fake"
USER_AGENT = "Fake"
+ def __init__(self, *args, **kwargs):
+ self.identity = FakeIdentity()
+
+
+class FakeStorageClient(StorageClient):
+ def __init__(self, identity=None, *args, **kwargs):
+ if identity is None:
+ identity = FakeIdentity()
+ super(FakeStorageClient, self).__init__(identity, *args, **kwargs)
+
+ def create(self, name):
+ return FakeContainer(self._manager, {"name": name})
+
+
+class FakeContainerManager(ContainerManager):
+ def __init__(self, api=None, *args, **kwargs):
+ if api is None:
+ api = FakeStorageClient()
+ super(FakeContainerManager, self).__init__(api, *args, **kwargs)
+
class FakeContainer(Container):
- def _fetch_cdn_data(self):
- self._cdn_uri = None
- self._cdn_ttl = self.client.default_cdn_ttl
- self._cdn_ssl_uri = None
- self._cdn_streaming_uri = None
- self._cdn_ios_uri = None
- self._cdn_log_retention = False
+ def __init__(self, *args, **kwargs):
+ super(FakeContainer, self).__init__(*args, **kwargs)
+ self.object_manager = FakeStorageObjectManager(self.manager.api,
+ uri_base=self.name)
+ self.object_manager._container = self
+
+
+class FakeStorageObjectManager(StorageObjectManager):
+ def __init__(self, api=None, *args, **kwargs):
+ if api is None:
+ api = FakeStorageClient()
+ if "uri_base" not in kwargs:
+ kwargs["uri_base"] = utils.random_ascii()
+ super(FakeStorageObjectManager, self).__init__(api, *args, **kwargs)
class FakeStorageObject(StorageObject):
- def __init__(self, client, container, name=None, total_bytes=None,
- content_type=None, last_modified=None, etag=None, attdict=None):
+ def __init__(self, manager, name=None, total_bytes=None, content_type=None,
+ last_modified=None, etag=None, attdict=None):
"""
The object can either be initialized with individual params, or by
passing the dict that is returned by swiftclient.
"""
- self.client = client
- self.container = container
+ self.manager = manager
self.name = name
- self.total_bytes = total_bytes
+ self.bytes = total_bytes or 0
self.content_type = content_type
self.last_modified = last_modified
- self.etag = etag
+ self.hash = etag
if attdict:
self._read_attdict(attdict)
@@ -165,7 +202,8 @@ class FakeService(object):
class FakeCSClient(FakeService):
def __init__(self, *args, **kwargs):
- super(FakeCSClient, self).__init__(*args, **kwargs)
+ ident = FakeIdentity()
+ super(FakeCSClient, self).__init__(ident, *args, **kwargs)
def dummy(self):
pass
@@ -202,21 +240,10 @@ class FakeBulkDeleter(BulkDeleter):
self.completed = True
-class FakeEntryPoint(object):
- def __init__(self, name):
- self.name = name
-
- def load(self):
- def dummy(*args, **kwargs):
- return self.name
- return dummy
-
-fakeEntryPoints = [FakeEntryPoint("a"), FakeEntryPoint("b"),
- FakeEntryPoint("c")]
-
-
class FakeManager(object):
- api = FakeClient()
+ def __init__(self, *args, **kwargs):
+ super(FakeManager, self).__init__(*args, **kwargs)
+ self.api = FakeClient()
def list(self):
pass
@@ -241,25 +268,6 @@ class FakeException(BaseException):
pass
-class FakeServiceCatalog(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def get_token(self):
- return "fake_token"
-
- def url_for(self, attr=None, filter_value=None,
- service_type=None, endpoint_type="publicURL",
- service_name=None, volume_service_name=None):
- if filter_value == "ALL":
- raise exc.AmbiguousEndpoints
- elif filter_value == "KEY":
- raise KeyError
- elif filter_value == "EP":
- raise exc.EndpointNotFound
- return "http://example.com"
-
-
class FakeKeyring(object):
password_set = False
@@ -315,7 +323,8 @@ class FakeDatabaseClient(CloudDatabaseClient):
def __init__(self, *args, **kwargs):
self._manager = FakeDatabaseManager(self)
self._flavor_manager = FakeManager()
- super(FakeDatabaseClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeDatabaseClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -326,8 +335,9 @@ class FakeNovaVolumeClient(BaseClient):
class FakeBlockStorageManager(CloudBlockStorageManager):
def __init__(self, api=None, *args, **kwargs):
+ ident = FakeIdentity()
if api is None:
- api = FakeBlockStorageClient()
+ api = FakeBlockStorageClient(ident)
super(FakeBlockStorageManager, self).__init__(api, *args, **kwargs)
@@ -350,13 +360,15 @@ class FakeBlockStorageClient(CloudBlockStorageClient):
def __init__(self, *args, **kwargs):
self._types_manager = FakeManager()
self._snapshot_manager = FakeManager()
- super(FakeBlockStorageClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeBlockStorageClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
class FakeLoadBalancerClient(CloudLoadBalancerClient):
def __init__(self, *args, **kwargs):
- super(FakeLoadBalancerClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeLoadBalancerClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -409,7 +421,8 @@ class FakeStatusChanger(object):
class FakeDNSClient(CloudDNSClient):
def __init__(self, *args, **kwargs):
- super(FakeDNSClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeDNSClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -447,7 +460,8 @@ class FakeDNSDevice(FakeLoadBalancer):
class FakeCloudNetworkClient(CloudNetworkClient):
def __init__(self, *args, **kwargs):
- super(FakeCloudNetworkClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeCloudNetworkClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -463,8 +477,9 @@ class FakeCloudNetwork(CloudNetwork):
class FakeAutoScaleClient(AutoScaleClient):
def __init__(self, *args, **kwargs):
+ ident = FakeIdentity()
self._manager = FakeManager()
- super(FakeAutoScaleClient, self).__init__(*args, **kwargs)
+ super(FakeAutoScaleClient, self).__init__(ident, *args, **kwargs)
class FakeAutoScalePolicy(AutoScalePolicy):
@@ -500,7 +515,8 @@ class FakeScalingGroup(ScalingGroup):
class FakeCloudMonitorClient(CloudMonitorClient):
def __init__(self, *args, **kwargs):
- super(FakeCloudMonitorClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeCloudMonitorClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -547,20 +563,10 @@ class FakeQueueClaim(QueueClaim):
**kwargs)
-class FakeQueueMessage(QueueMessage):
- def __init__(self, *args, **kwargs):
- id_ = utils.random_unicode()
- href = "http://example.com/%s" % id_
- info = kwargs.pop("info", {"href": href})
- info["name"] = utils.random_unicode()
- mgr = kwargs.pop("manager", FakeQueueManager())
- super(FakeQueueMessage, self).__init__(manager=mgr, info=info, *args,
- **kwargs)
-
-
class FakeQueueClient(QueueClient):
def __init__(self, *args, **kwargs):
- super(FakeQueueClient, self).__init__("fakeuser",
+ ident = FakeIdentity()
+ super(FakeQueueClient, self).__init__(ident, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -584,8 +590,10 @@ class FakeImage(Image):
class FakeImageClient(ImageClient):
- def __init__(self, *args, **kwargs):
- super(FakeImageClient, self).__init__("fakeuser",
+ def __init__(self, identity=None, *args, **kwargs):
+ if identity is None:
+ identity = FakeIdentity()
+ super(FakeImageClient, self).__init__(identity, "fakeuser",
"fakepassword", *args, **kwargs)
@@ -615,15 +623,43 @@ class FakeImageManager(ImageManager):
self.id = utils.random_ascii()
-class FakeIdentity(RaxIdentity):
+class FakeIdentityService(Service):
+ def __init__(self, identity=None, *args, **kwargs):
+ self.identity = identity or FakeIdentity()
+ self.name = "fake"
+ self.prefix = ""
+ self.service_type = "fake"
+ self.clients = {}
+ self.endpoints = utils.DotDict()
+
+
+class FakeEndpoint(Endpoint):
+ def __init__(self, ep_dict=None, service=None, region=None, identity=None):
+ if ep_dict is None:
+ ep_dict = {}
+ if identity is None:
+ identity = FakeIdentity()
+ if service is None:
+ service = FakeIdentityService(identity)
+ if region is None:
+ region = "fake_region"
+ super(FakeEndpoint, self).__init__(ep_dict, service, region, identity)
+
+
+class FakeRaxIdentity(RaxIdentity):
+ pass
+
+
+class FakeIdentity(BaseIdentity):
"""Class that returns canned authentication responses."""
def __init__(self, *args, **kwargs):
super(FakeIdentity, self).__init__(*args, **kwargs)
self._good_username = "fakeuser"
self._good_password = "fakeapikey"
self._default_region = random.choice(("DFW", "ORD"))
+ self.services = {"fake": FakeIdentityService(self)}
- def authenticate(self):
+ def authenticate(self, connect=False):
if ((self.username == self._good_username) and
(self.password == self._good_password)):
self._parse_response(self.fake_response())
@@ -808,6 +844,9 @@ fake_identity_response = {u'access':
'region': 'DFW',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
+ 'region': 'FAKE',
+ 'tenantId': 'MossoCloudFS_abc'},
+ {u'publicURL': 'https://cdn1.clouddrive.com/v1/MossoCloudFS_abc',
'region': 'SYD',
'tenantId': 'MossoCloudFS_abc'},
{u'publicURL': 'https://cdn2.clouddrive.com/v1/MossoCloudFS_abc',
diff --git a/awx/lib/site-packages/pyrax/http.py b/awx/lib/site-packages/pyrax/http.py
index acdfe9f55f..c5c75cc731 100644
--- a/awx/lib/site-packages/pyrax/http.py
+++ b/awx/lib/site-packages/pyrax/http.py
@@ -1,4 +1,4 @@
-# Copyright 2014 Rackspace
+# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -17,6 +17,7 @@
Wrapper around the requests library. Used for making all HTTP calls.
"""
+import logging
import json
import requests
@@ -48,7 +49,7 @@ def request(method, uri, *args, **kwargs):
req_method = req_methods[method.upper()]
raise_exception = kwargs.pop("raise_exception", True)
kwargs["headers"] = kwargs.get("headers", {})
- http_log_req(args, kwargs)
+ http_log_req(method, uri, args, kwargs)
data = None
if "data" in kwargs:
# The 'data' kwarg is used when you don't want json encoding.
@@ -72,27 +73,26 @@ def request(method, uri, *args, **kwargs):
return resp, body
-def http_log_req(args, kwargs):
+def http_log_req(method, uri, args, kwargs):
"""
When pyrax.get_http_debug() is True, outputs the equivalent `curl`
command for the API request being made.
"""
if not pyrax.get_http_debug():
return
- string_parts = ["curl -i"]
+ string_parts = ["curl -i -X %s" % method]
for element in args:
- if element in ("GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"):
- string_parts.append(" -X %s" % element)
- else:
- string_parts.append(" %s" % element)
-
+ string_parts.append("%s" % element)
for element in kwargs["headers"]:
- header = " -H '%s: %s'" % (element, kwargs["headers"][element])
+ header = "-H '%s: %s'" % (element, kwargs["headers"][element])
string_parts.append(header)
-
- pyrax._logger.debug("\nREQ: %s\n" % "".join(string_parts))
+ string_parts.append(uri)
+ log = logging.getLogger("pyrax")
+ log.debug("\nREQ: %s\n" % " ".join(string_parts))
if "body" in kwargs:
pyrax._logger.debug("REQ BODY: %s\n" % (kwargs["body"]))
+ if "data" in kwargs:
+ pyrax._logger.debug("REQ DATA: %s\n" % (kwargs["data"]))
def http_log_resp(resp, body):
@@ -102,4 +102,7 @@ def http_log_resp(resp, body):
"""
if not pyrax.get_http_debug():
return
- pyrax._logger.debug("RESP: %s %s\n", resp, body)
+ log = logging.getLogger("pyrax")
+ log.debug("RESP: %s\n%s", resp, resp.headers)
+ if body:
+ log.debug("RESP BODY: %s", body)
diff --git a/awx/lib/site-packages/pyrax/identity/keystone_identity.py b/awx/lib/site-packages/pyrax/identity/keystone_identity.py
index a36b7013b9..323dcc4048 100644
--- a/awx/lib/site-packages/pyrax/identity/keystone_identity.py
+++ b/awx/lib/site-packages/pyrax/identity/keystone_identity.py
@@ -1,12 +1,14 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
import pyrax
-from pyrax.base_identity import BaseAuth
-import pyrax.exceptions as exc
+from ..base_identity import BaseIdentity
+from .. import exceptions as exc
-class KeystoneIdentity(BaseAuth):
+class KeystoneIdentity(BaseIdentity):
"""
Implements the Keystone-specific behaviors for Identity. In most
cases you will want to create specific subclasses to implement the
diff --git a/awx/lib/site-packages/pyrax/identity/rax_identity.py b/awx/lib/site-packages/pyrax/identity/rax_identity.py
index a2cea2a0f1..741bca38c1 100644
--- a/awx/lib/site-packages/pyrax/identity/rax_identity.py
+++ b/awx/lib/site-packages/pyrax/identity/rax_identity.py
@@ -1,17 +1,21 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-from six.moves import configparser
+from __future__ import absolute_import
-from pyrax.base_identity import BaseAuth
-from pyrax.base_identity import User
-import pyrax.exceptions as exc
-import pyrax.utils as utils
+from six.moves import configparser as ConfigParser
+
+import pyrax
+from ..base_identity import BaseIdentity
+from ..base_identity import User
+from ..cloudnetworks import CloudNetworkClient
+from .. import exceptions as exc
+from .. import utils as utils
AUTH_ENDPOINT = "https://identity.api.rackspacecloud.com/v2.0/"
-class RaxIdentity(BaseAuth):
+class RaxIdentity(BaseIdentity):
"""
This class handles all of the authentication requirements for working
with the Rackspace Cloud.
@@ -21,7 +25,8 @@ class RaxIdentity(BaseAuth):
def _get_auth_endpoint(self):
- return self._auth_endpoint or AUTH_ENDPOINT
+ return (self._auth_endpoint or pyrax.get_setting("auth_endpoint")
+ or AUTH_ENDPOINT)
def _read_credential_file(self, cfg):
@@ -31,12 +36,12 @@ class RaxIdentity(BaseAuth):
self.username = cfg.get("rackspace_cloud", "username")
try:
self.password = cfg.get("rackspace_cloud", "api_key", raw=True)
- except configparser.NoOptionError as e:
+ except ConfigParser.NoOptionError as e:
# Allow either the use of either 'api_key' or 'password'.
self.password = cfg.get("rackspace_cloud", "password", raw=True)
- def _get_credentials(self):
+ def _format_credentials(self):
"""
Returns the current credentials in the format expected by the
authentication service. Note that by default Rackspace credentials
@@ -47,23 +52,40 @@ class RaxIdentity(BaseAuth):
if self._creds_style == "apikey":
return {"auth": {"RAX-KSKEY:apiKeyCredentials":
{"username": "%s" % self.username,
- "apiKey": "%s" % self.password}}}
+ "apiKey": "%s" % self.api_key}}}
else:
# Return in the default password-style
- return super(RaxIdentity, self)._get_credentials()
+ return super(RaxIdentity, self)._format_credentials()
- def authenticate(self):
+ def set_credentials(self, username, password=None, region=None,
+ tenant_id=None, authenticate=False):
+ """
+ Sets the username and password directly. Because Rackspace auth uses
+ the api_key, make sure that any old values are cleared.
+ """
+ self.api_key = None
+ super(RaxIdentity, self).set_credentials(username, password=password,
+ region=region, tenant_id=tenant_id, authenticate=authenticate)
+
+
+ def authenticate(self, username=None, password=None, api_key=None,
+ tenant_id=None, connect=False):
"""
If the user's credentials include an API key, the default behavior will
work. But if they are using a password, the initial attempt will fail,
so try again, but this time using the standard password format.
+
+ The 'connect' parameter is retained for backwards compatibility. It no
+ longer has any effect.
"""
try:
- super(RaxIdentity, self).authenticate()
+ super(RaxIdentity, self).authenticate(username=username,
+ password=password, api_key=api_key, tenant_id=tenant_id)
except exc.AuthenticationFailed:
self._creds_style = "password"
- super(RaxIdentity, self).authenticate()
+ super(RaxIdentity, self).authenticate(username=username,
+ password=password, api_key=api_key, tenant_id=tenant_id)
def auth_with_token(self, token, tenant_id=None, tenant_name=None):
@@ -80,16 +102,16 @@ class RaxIdentity(BaseAuth):
# object_store endpoints. We can then add these to the initial
# endpoints returned by the primary tenant ID, and then continue with
# the auth process.
- main_resp = self._call_token_auth(token, tenant_id, tenant_name)
- main_body = main_resp.json()
+ main_resp, main_body = self._call_token_auth(token, tenant_id,
+ tenant_name)
# Get the swift tenant ID
roles = main_body["access"]["user"]["roles"]
ostore = [role for role in roles
if role["name"] == "object-store:default"]
if ostore:
ostore_tenant_id = ostore[0]["tenantId"]
- ostore_resp = self._call_token_auth(token, ostore_tenant_id, None)
- ostore_body = ostore_resp.json()
+ ostore_resp, ostore_body = self._call_token_auth(token,
+ ostore_tenant_id, None)
ostore_cat = ostore_body["access"]["serviceCatalog"]
main_cat = main_body["access"]["serviceCatalog"]
main_cat.extend(ostore_cat)
@@ -106,13 +128,44 @@ class RaxIdentity(BaseAuth):
self._default_region = defreg
+ def get_client(self, service, region, public=True, cached=True):
+ """
+ Returns the client object for the specified service and region.
+
+ By default the public endpoint is used. If you wish to work with a
+ services internal endpoints, specify `public=False`.
+
+ By default, if a client has already been created for the given service,
+ region, and public values, that will be returned. To force a new client
+ to be created, pass 'cached=False'.
+ """
+ client_class = None
+ # Cloud Networks currently uses nova-networks, so it doesn't appear as
+ # a separate entry in the service catalog. This hack will allow context
+ # objects to continue to work with Rackspace Cloud Networks. When the
+ # Neutron service is implemented, this hack will have to be removed.
+ if service in ("compute:networks", "networks", "network",
+ "cloudnetworks", "cloud_networks"):
+ service = "compute"
+ client_class = CloudNetworkClient
+ return super(RaxIdentity, self).get_client(service, region,
+ public=public, cached=cached, client_class=client_class)
+
+
def find_user_by_name(self, name):
"""
Returns a User object by searching for the supplied user name. Returns
None if there is no match for the given name.
"""
- uri = "users?name=%s" % name
- return self._find_user(uri)
+ return self.get_user(username=name)
+
+
+ def find_user_by_email(self, email):
+ """
+ Returns a User object by searching for the supplied user's email
+ address. Returns None if there is no match for the given ID.
+ """
+ return self.get_user(email=email)
def find_user_by_id(self, uid):
@@ -120,18 +173,42 @@ class RaxIdentity(BaseAuth):
Returns a User object by searching for the supplied user ID. Returns
None if there is no match for the given ID.
"""
- uri = "users/%s" % uid
- return self._find_user(uri)
+ return self.get_user(user_id=uid)
- def _find_user(self, uri):
- """Handles the 'find' code for both name and ID searches."""
- resp = self.method_get(uri)
- if resp.status_code in (403, 404):
- return None
- jusers = resp.json()
- user_info = jusers["user"]
- return User(self, user_info)
+ def get_user(self, user_id=None, username=None, email=None):
+ """
+ Returns the user specified by either ID, username or email.
+
+ Since more than user can have the same email address, searching by that
+ term will return a list of 1 or more User objects. Searching by
+ username or ID will return a single User.
+
+ If a user_id that doesn't belong to the current account is searched
+ for, a Forbidden exception is raised. When searching by username or
+ email, a NotFound exception is raised if there is no matching user.
+ """
+ if user_id:
+ uri = "/users/%s" % user_id
+ elif username:
+ uri = "/users?name=%s" % username
+ elif email:
+ uri = "/users?email=%s" % email
+ else:
+ raise ValueError("You must include one of 'user_id', "
+ "'username', or 'email' when calling get_user().")
+ resp, resp_body = self.method_get(uri)
+ if resp.status_code == 404:
+ raise exc.NotFound("No such user exists.")
+ users = resp_body.get("users", [])
+ if users:
+ return [User(self, user) for user in users]
+ else:
+ user = resp_body.get("user", {})
+ if user:
+ return User(self, user)
+ else:
+ raise exc.NotFound("No such user exists.")
def update_user(self, user, email=None, username=None,
@@ -151,24 +228,26 @@ class RaxIdentity(BaseAuth):
if enabled is not None:
upd["enabled"] = enabled
data = {"user": upd}
- resp = self.method_put(uri, data=data)
- return User(self, resp.json())
+ resp, resp_body = self.method_put(uri, data=data)
+ if resp.status_code in (401, 403, 404):
+ raise exc.AuthorizationFailure("You are not authorized to update "
+ "users.")
+ return User(self, resp_body)
- def list_credentials(self, user):
+ def reset_api_key(self, user=None):
"""
- Returns a user's non-password credentials.
- """
- user_id = utils.get_id(user)
- uri = "users/%s/OS-KSADM/credentials" % user_id
- return self.method_get(uri)
+ Resets the API key for the specified user, or if no user is specified,
+ for the current user. Returns the newly-created API key.
-
- def get_user_credentials(self, user):
+ Resetting an API key does not invalidate any authenticated sessions,
+ nor does it revoke any tokens.
"""
- Returns a user's non-password credentials.
- """
- user_id = utils.get_id(user)
- base_uri = "users/%s/OS-KSADM/credentials/RAX-KSKEY:apiKeyCredentials"
- uri = base_uri % user_id
- return self.method_get(uri)
+ if user is None:
+ user_id = utils.get_id(self)
+ else:
+ user_id = utils.get_id(user)
+ uri = "users/%s/OS-KSADM/credentials/" % user_id
+ uri += "RAX-KSKEY:apiKeyCredentials/RAX-AUTH/reset"
+ resp, resp_body = self.method_post(uri)
+ return resp_body.get("RAX-KSKEY:apiKeyCredentials", {}).get("apiKey")
diff --git a/awx/lib/site-packages/pyrax/image.py b/awx/lib/site-packages/pyrax/image.py
index 810a4db403..c5c61bb737 100644
--- a/awx/lib/site-packages/pyrax/image.py
+++ b/awx/lib/site-packages/pyrax/image.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-# Copyright 2014 Rackspace
+# Copyright (c)2014 Rackspace US, Inc.
# All Rights Reserved.
#
@@ -20,6 +20,7 @@
from functools import wraps
import pyrax
+from pyrax.object_storage import StorageObject
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
@@ -251,6 +252,45 @@ class ImageManager(BaseManager):
return ret
+ def create(self, name, img_format=None, img_container_format=None,
+ data=None, container=None, obj=None, metadata=None):
+ """
+ Creates a new image with the specified name. The image data can either
+ be supplied directly in the 'data' parameter, or it can be an image
+ stored in the object storage service. In the case of the latter, you
+ can either supply the container and object names, or simply a
+ StorageObject reference.
+
+ You may specify the image and image container formats; if unspecified,
+ the default of "vhd" for image format and "bare" for image container
+ format will be used.
+
+ NOTE: This is blocking, and may take a while to complete.
+ """
+ if img_format is None:
+ img_format = "vhd"
+ if img_container_format is None:
+ img_container_format = "bare"
+ headers = {
+ "X-Image-Meta-name": name,
+ "X-Image-Meta-disk_format": img_format,
+ "X-Image-Meta-container_format": img_container_format,
+ }
+ if data:
+ img_data = data
+ else:
+ ident = self.api.identity
+ region = self.api.region_name
+ clt = ident.get_client("object_store", region)
+ if not isinstance(obj, StorageObject):
+ obj = clt.get_object(container, obj)
+ img_data = obj.fetch()
+ uri = "%s/images" % self.uri_base
+ resp, resp_body = self.api.method_post(uri, headers=headers,
+ data=img_data)
+
+
+
def update(self, img, value_dict):
"""
Accepts an image reference (object or ID) and dictionary of key/value
@@ -294,7 +334,8 @@ class ImageManager(BaseManager):
raise exc.InvalidImageMemberStatus("The status value must be one "
"of 'accepted', 'rejected', or 'pending'. Received: '%s'" %
status)
- project_id = pyrax.identity.tenant_id
+ api = self.api
+ project_id = api.identity.tenant_id
uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id)
body = {"status": status}
try:
@@ -385,7 +426,10 @@ class ImageTasksManager(BaseManager):
if cont:
# Verify that it exists. If it doesn't, a NoSuchContainer exception
# will be raised.
- pyrax.cloudfiles.get_container(cont)
+ api = self.api
+ rgn = api.region_name
+ cf = api.identity.object_store[rgn].client
+ cf.get_container(cont)
return super(ImageTasksManager, self).create(name, *args, **kwargs)
@@ -518,6 +562,19 @@ class ImageClient(BaseClient):
return self._manager.update(img, value_dict)
+ def create(self, name, img_format=None, data=None, container=None,
+ obj=None, metadata=None):
+ """
+ Creates a new image with the specified name. The image data can either
+ be supplied directly in the 'data' parameter, or it can be an image
+ stored in the object storage service. In the case of the latter, you
+ can either supply the container and object names, or simply a
+ StorageObject reference.
+ """
+ return self._manager.create(name, img_format, data=data,
+ container=container, obj=obj)
+
+
def change_image_name(self, img, newname):
"""
Image name can be changed via the update() method. This is simply a
diff --git a/awx/lib/site-packages/pyrax/manager.py b/awx/lib/site-packages/pyrax/manager.py
index 94453138cc..4f1e968efc 100644
--- a/awx/lib/site-packages/pyrax/manager.py
+++ b/awx/lib/site-packages/pyrax/manager.py
@@ -1,7 +1,7 @@
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Rackspace
+# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
diff --git a/awx/lib/site-packages/pyrax/object_storage.py b/awx/lib/site-packages/pyrax/object_storage.py
new file mode 100644
index 0000000000..5e47d9bff6
--- /dev/null
+++ b/awx/lib/site-packages/pyrax/object_storage.py
@@ -0,0 +1,3270 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Rackspace
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from __future__ import print_function
+from __future__ import absolute_import
+import datetime
+from functools import wraps
+import hashlib
+import hmac
+import json
+import logging
+import math
+import mimetypes
+import os
+import re
+import six
+import threading
+import time
+import uuid
+
+import pyrax
+from pyrax.client import BaseClient
+import pyrax.exceptions as exc
+from pyrax.manager import BaseManager
+from pyrax.resource import BaseResource
+import pyrax.utils as utils
+
+ACCOUNT_META_PREFIX = "X-Account-Meta-"
+CONTAINER_META_PREFIX = "X-Container-Meta-"
+CONTAINER_HEAD_PREFIX = "X-Container-"
+OBJECT_META_PREFIX = "X-Object-Meta-"
+
+# Maximum size of a stored object: 5GB - 1
+MAX_FILE_SIZE = 5368709119
+# Default size for chunked uploads, in bytes
+DEFAULT_CHUNKSIZE = 65536
+# The default for CDN when TTL is not specified.
+DEFAULT_CDN_TTL = 86400
+# When comparing files dates, represents a date older than anything.
+EARLY_DATE_STR = "1900-01-01T00:00:00"
+
+# Used to indicate values that are lazy-loaded
+class Fault_cls(object):
+ def __nonzero__(self):
+ return False
+
+FAULT = Fault_cls()
+
+
+def assure_container(fnc):
+ """
+ Assures that whether a Container or a name of a container is passed, a
+ Container object is available.
+ """
+ @wraps(fnc)
+ def _wrapped(self, container, *args, **kwargs):
+ if not isinstance(container, Container):
+ # Must be the name
+ container = self.get(container)
+ return fnc(self, container, *args, **kwargs)
+ return _wrapped
+
+
+def _massage_metakeys(dct, prfx):
+ """
+ Returns a copy of the supplied dictionary, prefixing any keys that do
+ not begin with the specified prefix accordingly.
+ """
+ lowprefix = prfx.lower()
+ ret = {}
+ for k, v in list(dct.items()):
+ if not k.lower().startswith(lowprefix):
+ k = "%s%s" % (prfx, k)
+ ret[k] = v
+ return ret
+
+
+def _validate_file_or_path(file_or_path, obj_name):
+ if isinstance(file_or_path, six.string_types):
+ # Make sure it exists
+ if not os.path.exists(file_or_path):
+ raise exc.FileNotFound("The file '%s' does not exist." %
+ file_or_path)
+ fname = os.path.basename(file_or_path)
+ else:
+ try:
+ fname = os.path.basename(file_or_path.name)
+ except AttributeError:
+ fname = None
+ return obj_name or fname
+
+
+def _valid_upload_key(fnc):
+ @wraps(fnc)
+ def wrapped(self, upload_key, *args, **kwargs):
+ try:
+ self.folder_upload_status[upload_key]
+ except KeyError:
+ raise exc.InvalidUploadID("There is no folder upload with the "
+ "key '%s'." % upload_key)
+ return fnc(self, upload_key, *args, **kwargs)
+ return wrapped
+
+
+def _handle_container_not_found(fnc):
+ @wraps(fnc)
+ def wrapped(self, container, *args, **kwargs):
+ try:
+ return fnc(self, container, *args, **kwargs)
+ except exc.NotFound as e:
+ name = utils.get_name(container)
+ e.message = "Container '%s' doesn't exist" % name
+ raise exc.NoSuchContainer(e)
+ return wrapped
+
+
+def _handle_object_not_found(fnc):
+ @wraps(fnc)
+ def wrapped(self, obj, *args, **kwargs):
+ try:
+ return fnc(self, obj, *args, **kwargs)
+ except exc.NotFound as e:
+ name = utils.get_name(obj)
+ e.message = "Object '%s' doesn't exist" % name
+ raise exc.NoSuchObject(e)
+ return wrapped
+
+
+def get_file_size(fileobj):
+ """
+ Returns the size of a file-like object.
+ """
+ currpos = fileobj.tell()
+ fileobj.seek(0, 2)
+ total_size = fileobj.tell()
+ fileobj.seek(currpos)
+ return total_size
+
+
+
+class Container(BaseResource):
+ def __init__(self, *args, **kwargs):
+ super(Container, self).__init__(*args, **kwargs)
+ self._cdn_enabled = FAULT
+ self._cdn_uri = FAULT
+ self._cdn_ttl = FAULT
+ self._cdn_ssl_uri = FAULT
+ self._cdn_streaming_uri = FAULT
+ self._cdn_ios_uri = FAULT
+ self._cdn_log_retention = FAULT
+ self.object_manager = StorageObjectManager(self.manager.api,
+ uri_base=self.name, resource_class=StorageObject)
+ self._non_display = ["object_manager"]
+ self._backwards_aliases()
+
+
+ def _backwards_aliases(self):
+ self.get_objects = self.list
+ self.get_object_names = self.list_object_names
+ # Prevent these from displaying
+ self._non_display.extend(["get_objects", "get_object",
+ "get_object_names"])
+
+
+ def __repr__(self):
+ return "" % self.name
+
+
+ @property
+ def id(self):
+ """
+ Since a container's name serves as its ID, this will allow both to be
+ used.
+ """
+ return self.name
+
+
+ def _set_cdn_defaults(self):
+ """Sets all the CDN-related attributes to default values."""
+ if self._cdn_enabled is FAULT:
+ self._cdn_enabled = False
+ self._cdn_uri = None
+ self._cdn_ttl = DEFAULT_CDN_TTL
+ self._cdn_ssl_uri = None
+ self._cdn_streaming_uri = None
+ self._cdn_ios_uri = None
+ self._cdn_log_retention = False
+
+
+ def _fetch_cdn_data(self):
+ """Fetches the container's CDN data from the CDN service"""
+ if self._cdn_enabled is FAULT:
+ headers = self.manager.fetch_cdn_data(self)
+ else:
+ headers = {}
+ # Set defaults in case not all headers are present.
+ self._set_cdn_defaults()
+ if not headers:
+ # Not CDN enabled; return
+ return
+ else:
+ self._cdn_enabled = True
+ for key, value in headers.items():
+ low_key = key.lower()
+ if low_key == "x-cdn-uri":
+ self._cdn_uri = value
+ elif low_key == "x-ttl":
+ self._cdn_ttl = int(value)
+ elif low_key == "x-cdn-ssl-uri":
+ self._cdn_ssl_uri = value
+ elif low_key == "x-cdn-streaming-uri":
+ self._cdn_streaming_uri = value
+ elif low_key == "x-cdn-ios-uri":
+ self._cdn_ios_uri = value
+ elif low_key == "x-log-retention":
+ self._cdn_log_retention = (value == "True")
+
+
+ def get_metadata(self, prefix=None):
+ """
+ Returns a dictionary containing the metadata for this container.
+ """
+ return self.manager.get_metadata(self, prefix=prefix)
+
+
+ def set_metadata(self, metadata, clear=False, prefix=None):
+ """
+ Accepts a dictionary of metadata key/value pairs and updates the
+ specified container metadata with them.
+
+ If 'clear' is True, any existing metadata is deleted and only the
+ passed metadata is retained. Otherwise, the values passed here update
+ the container's metadata.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+
+ By default, the standard container metadata prefix
+ ('X-Container-Meta-') is prepended to the header name if it isn't
+ present. For non-standard headers, you must include a non-None prefix,
+ such as an empty string.
+ """
+ return self.manager.set_metadata(self, metadata, clear=clear,
+ prefix=prefix)
+
+
+ def remove_metadata_key(self, key, prefix=None):
+ """
+ Removes the specified key from the container's metadata. If the key
+ does not exist in the metadata, nothing is done.
+ """
+ return self.manager.remove_metadata_key(self, key, prefix=prefix)
+
+
+ def set_web_index_page(self, page):
+ """
+ Sets the header indicating the index page in this container when
+ creating a static website.
+
+ Note: the container must be CDN-enabled for this to have any effect.
+ """
+ return self.manager.set_web_index_page(self, page)
+
+
+ def set_web_error_page(self, page):
+ """
+ Sets the header indicating the error page in this container when
+ creating a static website.
+
+ Note: the container must be CDN-enabled for this to have any effect.
+ """
+ return self.manager.set_web_error_page(self, page)
+
+
+ def make_public(self, ttl=None):
+ """
+ Enables CDN access for this container, and optionally sets the TTL.
+ """
+ return self.manager.make_public(self, ttl=ttl)
+
+
+ def make_private(self):
+ """
+ Disables CDN access to this container. It may still appear public until
+ the TTL expires.
+ """
+ return self.manager.make_private(self)
+
+
+ def purge_cdn_object(self, obj, email_addresses=None):
+ """
+ Removes a CDN-enabled object from public access before the TTL expires.
+ Please note that there is a limit (at this time) of 25 such requests;
+ if you need to purge more than that, you must contact support.
+
+ If one or more email_addresses are included, an email confirming the
+ purge is sent to each address.
+ """
+ return self.object_manager.purge(obj, email_addresses=email_addresses)
+
+
+ def get_object(self, item):
+ """
+ Returns a StorageObject matching the specified item. If no such object
+ exists, a NotFound exception is raised. If 'item' is not a string, that
+ item is returned unchanged.
+ """
+ if isinstance(item, six.string_types):
+ item = self.object_manager.get(item)
+ return item
+
+
+ def list(self, marker=None, limit=None, prefix=None, delimiter=None,
+ end_marker=None, full_listing=False, return_raw=False):
+ """
+ List the objects in this container, using the parameters to control the
+ number and content of objects. Note that this is limited by the
+ absolute request limits of Swift (currently 10,000 objects). If you
+ need to list all objects in the container, use the `list_all()` method
+ instead.
+ """
+ if full_listing:
+ return self.list_all(prefix=prefix)
+ else:
+ return self.object_manager.list(marker=marker, limit=limit,
+ prefix=prefix, delimiter=delimiter, end_marker=end_marker,
+ return_raw=return_raw)
+
+
+ def list_all(self, prefix=None):
+ """
+ List all the objects in this container, optionally filtered by an
+ initial prefix. Returns an iterator that will yield all the objects in
+ the container, even if the number exceeds the absolute limits of Swift.
+ """
+ return self.manager.object_listing_iterator(self, prefix=prefix)
+
+
+ def list_object_names(self, marker=None, limit=None, prefix=None,
+ delimiter=None, end_marker=None, full_listing=False):
+ """
+ Returns a list of the names of all the objects in this container. The
+ same pagination parameters apply as in self.list().
+ """
+ if full_listing:
+ objects = self.list_all(prefix=prefix)
+ else:
+ objects = self.list(marker=marker, limit=limit, prefix=prefix,
+ delimiter=delimiter, end_marker=end_marker)
+ return [obj.name for obj in objects]
+
+
+ def find(self, **kwargs):
+ """
+ Finds a single object with attributes matching ``**kwargs``.
+
+ This isn't very efficient: it loads the entire list then filters on
+ the Python side.
+ """
+ return self.object_manager.find(**kwargs)
+
+
+ def findall(self, **kwargs):
+ """
+ Finds all objects with attributes matching ``**kwargs``.
+
+ This isn't very efficient: it loads the entire list then filters on
+ the Python side.
+ """
+ return self.object_manager.findall(**kwargs)
+
+
+ def create(self, file_or_path=None, data=None, obj_name=None,
+ content_type=None, etag=None, content_encoding=None,
+ content_length=None, ttl=None, chunked=False, metadata=None,
+ chunk_size=None, headers=None, return_none=False):
+ """
+ Creates or replaces a storage object in this container.
+
+ The content of the object can either be a stream of bytes (`data`), or
+ a file on disk (`file_or_path`). The disk file can be either an open
+ file-like object, or an absolute path to the file on disk.
+
+ When creating object from a data stream, you must specify the name of
+ the object to be created in the container via the `obj_name` parameter.
+ When working with a file, though, if no `obj_name` value is specified,
+ the file`s name will be used.
+
+ You may optionally set the `content_type` and `content_encoding`
+ parameters; pyrax will create the appropriate headers when the object
+ is stored. If no `content_type` is specified, the object storage system
+ will make an intelligent guess based on the content of the object.
+
+ If the size of the file is known, it can be passed as `content_length`.
+
+ If you wish for the object to be temporary, specify the time it should
+ be stored in seconds in the `ttl` parameter. If this is specified, the
+ object will be deleted after that number of seconds.
+
+ If you wish to store a stream of data (i.e., where you don't know the
+ total size in advance), set the `chunked` parameter to True, and omit
+ the `content_length` and `etag` parameters. This allows the data to be
+ streamed to the object in the container without having to be written to
+ disk first.
+ """
+ return self.object_manager.create(file_or_path=file_or_path,
+ data=data, obj_name=obj_name, content_type=content_type,
+ etag=etag, content_encoding=content_encoding,
+ content_length=content_length, ttl=ttl, chunked=chunked,
+ metadata=metadata, chunk_size=chunk_size, headers=headers,
+ return_none=return_none)
+
+
+ def store_object(self, obj_name, data, content_type=None, etag=None,
+ content_encoding=None, ttl=None, return_none=False,
+ headers=None, extra_info=None):
+ """
+ Creates a new object in this container, and populates it with the given
+ data. A StorageObject reference to the uploaded file will be returned,
+ unless 'return_none' is set to True.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ return self.create(obj_name=obj_name, data=data,
+ content_type=content_type, etag=etag,
+ content_encoding=content_encoding, ttl=ttl,
+ return_none=return_none, headers=headers)
+
+
+ def upload_file(self, file_or_path, obj_name=None, content_type=None,
+ etag=None, return_none=False, content_encoding=None, ttl=None,
+ content_length=None, headers=None):
+ """
+ Uploads the specified file to this container. If no name is supplied,
+ the file's name will be used. Either a file path or an open file-like
+ object may be supplied. A StorageObject reference to the uploaded file
+ will be returned, unless 'return_none' is set to True.
+
+ You may optionally set the `content_type` and `content_encoding`
+ parameters; pyrax will create the appropriate headers when the object
+ is stored.
+
+ If the size of the file is known, it can be passed as `content_length`.
+
+ If you wish for the object to be temporary, specify the time it should
+ be stored in seconds in the `ttl` parameter. If this is specified, the
+ object will be deleted after that number of seconds.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ return self.create(file_or_path=file_or_path, obj_name=obj_name,
+ content_type=content_type, etag=etag,
+ content_encoding=content_encoding, headers=headers,
+ content_length=content_length, ttl=ttl,
+ return_none=return_none)
+
+
+ def fetch(self, obj, include_meta=False, chunk_size=None, size=None,
+ extra_info=None):
+ """
+ Fetches the object from storage.
+
+ If 'include_meta' is False, only the bytes representing the
+ stored object are returned.
+
+ Note: if 'chunk_size' is defined, you must fully read the object's
+ contents before making another request.
+
+ If 'size' is specified, only the first 'size' bytes of the object will
+ be returned. If the object if smaller than 'size', the entire object is
+ returned.
+
+ When 'include_meta' is True, what is returned from this method is a
+ 2-tuple:
+ Element 0: a dictionary containing metadata about the file.
+ Element 1: a stream of bytes representing the object's contents.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ return self.object_manager.fetch(obj, include_meta=include_meta,
+ chunk_size=chunk_size, size=size)
+
+
+ def fetch_object(self, obj_name, include_meta=False, chunk_size=None):
+ """
+ Alias for self.fetch(); included for backwards compatibility
+ """
+ return self.fetch(obj=obj_name, include_meta=include_meta,
+ chunk_size=chunk_size)
+
+
+ def fetch_partial(self, obj, size):
+ """
+ Returns the first 'size' bytes of an object. If the object is smaller
+ than the specified 'size' value, the entire object is returned.
+ """
+ return self.object_manager.fetch_partial(obj, size)
+
+
+ def download(self, obj, directory, structure=True):
+ """
+ Fetches the object from storage, and writes it to the specified
+ directory. The directory must exist before calling this method.
+
+ If the object name represents a nested folder structure, such as
+ "foo/bar/baz.txt", that folder structure will be created in the target
+ directory by default. If you do not want the nested folders to be
+ created, pass `structure=False` in the parameters.
+ """
+ return self.object_manager.download(obj, directory, structure=structure)
+
+
+ def download_object(self, obj_name, directory, structure=True):
+ """
+ Alias for self.download(); included for backwards compatibility
+ """
+ return self.download(obj=obj_name, directory=directory,
+ structure=structure)
+
+
+ def delete(self, del_objects=False):
+ """
+ Deletes this Container. If the container contains objects, the
+ command will fail unless 'del_objects' is passed as True. In that
+ case, each object will be deleted first, and then the container.
+ """
+ return self.manager.delete(self, del_objects=del_objects)
+
+
+ def delete_object(self, obj):
+ """
+ Deletes the object from this container.
+
+ The 'obj' parameter can either be the name of the object, or a
+ StorageObject representing the object to be deleted.
+ """
+ return self.object_manager.delete(obj)
+
+
+ def delete_object_in_seconds(self, obj, seconds, extra_info=None):
+ """
+ Sets the object in this container to be deleted after the specified
+ number of seconds.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ return self.manager.delete_object_in_seconds(self, obj, seconds)
+
+
+ def delete_all_objects(self, async=False):
+ """
+ Deletes all objects from this container.
+
+ By default the call will block until all objects have been deleted. By
+ passing True for the 'async' parameter, this method will not block, and
+ instead return an object that can be used to follow the progress of the
+ deletion. When deletion is complete the bulk deletion object's
+ 'results' attribute will be populated with the information returned
+ from the API call. In synchronous mode this is the value that is
+ returned when the call completes. It is a dictionary with the following
+ keys:
+
+ deleted - the number of objects deleted
+ not_found - the number of objects not found
+ status - the HTTP return status code. '200 OK' indicates success
+ errors - a list of any errors returned by the bulk delete call
+ """
+ nms = self.list_object_names(full_listing=True)
+ return self.object_manager.delete_all_objects(nms, async=async)
+
+
+ def copy_object(self, obj, new_container, new_obj_name=None,
+ content_type=None):
+ """
+ Copies the object to the new container, optionally giving it a new name.
+ If you copy to the same container, you must supply a different name.
+ """
+ return self.manager.copy_object(self, obj, new_container,
+ new_obj_name=new_obj_name, content_type=content_type)
+
+
+ def move_object(self, obj, new_container, new_obj_name=None,
+ new_reference=False, content_type=None, extra_info=None):
+ """
+ Works just like copy_object, except that the source object is deleted
+ after a successful copy.
+
+ You can optionally change the content_type of the object by supplying
+ that in the 'content_type' parameter.
+
+ NOTE: any references to the original object will no longer be valid;
+ you will have to get a reference to the new object by passing True for
+ the 'new_reference' parameter. When this is True, a reference to the
+ newly moved object is returned. Otherwise, the etag for the moved
+ object is returned.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ return self.manager.move_object(self, obj, new_container,
+ new_obj_name=new_obj_name, new_reference=new_reference,
+ content_type=content_type)
+
+
+ def change_object_content_type(self, obj, new_ctype, guess=False):
+ """
+ Copies object to itself, but applies a new content-type. The guess
+ feature requires this container to be CDN-enabled. If not, then the
+ content-type must be supplied. If using guess with a CDN-enabled
+ container, new_ctype can be set to None. Failure during the put will
+ result in an exception.
+ """
+ return self.manager.change_object_content_type(self, obj, new_ctype,
+ guess=guess)
+
+
+ def get_temp_url(self, obj, seconds, method="GET", key=None, cached=True):
+ """
+ Given a storage object in this container, returns a URL that can be
+ used to access that object. The URL will expire after `seconds`
+ seconds.
+
+ The only methods supported are GET and PUT. Anything else will raise an
+ `InvalidTemporaryURLMethod` exception.
+
+ If you have your Temporary URL key, you can pass it in directly and
+ potentially save an API call to retrieve it. If you don't pass in the
+ key, and don't wish to use any cached value, pass `cached=False`.
+ """
+ return self.manager.get_temp_url(self, obj, seconds, method=method,
+ key=key, cached=cached)
+
+
+ def get_object_metadata(self, obj):
+ """
+ Returns the metadata for the specified object as a dict.
+ """
+ return self.object_manager.get_metadata(obj)
+
+
+ def set_object_metadata(self, obj, metadata, clear=False, extra_info=None,
+ prefix=None):
+ """
+ Accepts a dictionary of metadata key/value pairs and updates the
+ specified object metadata with them.
+
+ If 'clear' is True, any existing metadata is deleted and only the
+ passed metadata is retained. Otherwise, the values passed here update
+ the object's metadata.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+
+ By default, the standard object metadata prefix ('X-Object-Meta-') is
+ prepended to the header name if it isn't present. For non-standard
+ headers, you must include a non-None prefix, such as an empty string.
+ """
+ return self.object_manager.set_metadata(obj, metadata, clear=clear,
+ prefix=prefix)
+
+
+ def list_subdirs(self, marker=None, limit=None, prefix=None, delimiter=None,
+ full_listing=False):
+ """
+ Return a list of the namesrepresenting the pseudo-subdirectories in
+ this container. You can use the marker and limit params to handle
+ pagination, and the prefix param to filter the objects returned. The
+ delimiter param is there for backwards compatibility only, as the call
+ requires the delimiter to be '/'.
+ """
+ return self.manager.list_subdirs(self, marker=marker, limit=limit,
+ prefix=prefix, delimiter=delimiter, full_listing=full_listing)
+
+
+ def remove_from_cache(self, obj):
+ """
+ Not used anymore. Included for backwards compatibility.
+ """
+ pass
+
+
+ # BEGIN - CDN property definitions ##
+ def _get_cdn_log_retention(self):
+ if self._cdn_log_retention is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_log_retention
+
+ def _set_cdn_log_retention(self, val):
+ self.manager.set_cdn_log_retention(self, val)
+ self._cdn_log_retention = val
+
+
+ def _get_cdn_enabled(self):
+ if self._cdn_enabled is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_enabled
+
+ def _set_cdn_enabled(self, val):
+ self._cdn_enabled = val
+
+
+ def _get_cdn_uri(self):
+ if self._cdn_uri is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_uri
+
+ def _set_cdn_uri(self, val):
+ self._cdn_uri = val
+
+
+ def _get_cdn_ttl(self):
+ if self._cdn_ttl is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_ttl
+
+ def _set_cdn_ttl(self, val):
+ self._cdn_ttl = val
+
+
+ def _get_cdn_ssl_uri(self):
+ if self._cdn_ssl_uri is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_ssl_uri
+
+ def _set_cdn_ssl_uri(self, val):
+ self._cdn_ssl_uri = val
+
+
+ def _get_cdn_streaming_uri(self):
+ if self._cdn_streaming_uri is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_streaming_uri
+
+ def _set_cdn_streaming_uri(self, val):
+ self._cdn_streaming_uri = val
+
+
+ def _get_cdn_ios_uri(self):
+ if self._cdn_ios_uri is FAULT:
+ self._fetch_cdn_data()
+ return self._cdn_ios_uri
+
+ def _set_cdn_ios_uri(self, val):
+ self._cdn_ios_uri = val
+
+
+ cdn_enabled = property(_get_cdn_enabled, _set_cdn_enabled)
+ cdn_log_retention = property(_get_cdn_log_retention, _set_cdn_log_retention)
+ cdn_uri = property(_get_cdn_uri, _set_cdn_uri)
+ cdn_ttl = property(_get_cdn_ttl, _set_cdn_ttl)
+ cdn_ssl_uri = property(_get_cdn_ssl_uri, _set_cdn_ssl_uri)
+ cdn_streaming_uri = property(_get_cdn_streaming_uri, _set_cdn_streaming_uri)
+ cdn_ios_uri = property(_get_cdn_ios_uri, _set_cdn_ios_uri)
+ # END - CDN property definitions ##
+
+
+
+class ContainerManager(BaseManager):
+ def _list(self, uri, obj_class=None, body=None, return_raw=False):
+ """
+ Swift doesn't return listings in the same format as the rest of
+ OpenStack, so this method has to be overriden.
+ """
+ resp, resp_body = self.api.method_get(uri)
+ return [Container(self, res, loaded=False)
+ for res in resp_body if res]
+
+
+ @_handle_container_not_found
+ def get(self, container):
+ """
+ Returns a Container matching the specified container name. If no such
+ container exists, a NoSuchContainer exception is raised.
+ """
+ name = utils.get_name(container)
+ uri = "/%s" % name
+ resp, resp_body = self.api.method_head(uri)
+ hdrs = resp.headers
+ data = {"total_bytes": int(hdrs.get("x-container-bytes-used", "0")),
+ "object_count": int(hdrs.get("x-container-object-count", "0")),
+ "name": name}
+ return Container(self, data, loaded=False)
+
+
+ def create(self, name, metadata=None, prefix=None, *args, **kwargs):
+ """
+ Creates a new container, and returns a Container object that represents
+ that contianer. If a container by the same name already exists, no
+ exception is raised; instead, a reference to that existing container is
+ returned.
+ """
+ uri = "/%s" % name
+ headers = {}
+ if prefix is None:
+ prefix = CONTAINER_META_PREFIX
+ if metadata:
+ metadata = _massage_metakeys(metadata, prefix)
+ headers = metadata
+ resp, resp_body = self.api.method_put(uri, headers=headers)
+ if resp.status_code in (201, 202):
+ hresp, hresp_body = self.api.method_head(uri)
+ num_obj = int(hresp.headers.get("x-container-object-count", "0"))
+ num_bytes = int(hresp.headers.get("x-container-bytes-used", "0"))
+ cont_info = {"name": name, "object_count": num_obj,
+ "total_bytes": num_bytes}
+ return Container(self, cont_info)
+ elif resp.status_code == 400:
+ raise exc.ClientException("Container creation failed: %s" %
+ resp_body)
+
+
+ @_handle_container_not_found
+ def delete(self, container, del_objects=False):
+ """
+ Deletes the specified container. If the container contains objects, the
+ command will fail unless 'del_objects' is passed as True. In that case,
+ each object will be deleted first, and then the container.
+ """
+ if del_objects:
+ nms = self.list_object_names(container)
+ self.api.bulk_delete(container, nms, async=False)
+ uri = "/%s" % utils.get_name(container)
+ resp, resp_body = self.api.method_delete(uri)
+
+
+ def _create_body(self, name, *args, **kwargs):
+ """
+ Container creation requires no body.
+ """
+ return None
+
+
+ @_handle_container_not_found
+ def fetch_cdn_data(self, container):
+ """
+ Returns a dict containing the CDN information for the specified
+ container. If the container is not CDN-enabled, returns an empty dict.
+ """
+ name = utils.get_name(container)
+ uri = "/%s" % name
+ try:
+ resp, resp_body = self.api.cdn_request(uri, "HEAD")
+ except exc.NotCDNEnabled:
+ return {}
+ return resp.headers
+
+
+ def get_account_headers(self):
+ """
+ Return the headers for the account. This includes all the headers, not
+ just the account-specific headers. The calling program is responsible
+ for only using the ones that it needs.
+ """
+ resp, resp_body = self.api.method_head("/")
+ return resp.headers
+
+
+ @_handle_container_not_found
+ def get_headers(self, container):
+ """
+ Return the headers for the specified container.
+ """
+ uri = "/%s" % utils.get_name(container)
+ resp, resp_body = self.api.method_head(uri)
+ return resp.headers
+
+
+ def get_account_metadata(self, prefix=None):
+ """
+ Returns a dictionary containing metadata about the account.
+ """
+ headers = self.get_account_headers()
+ if prefix is None:
+ prefix = ACCOUNT_META_PREFIX
+ low_prefix = prefix.lower()
+ ret = {}
+ for hkey, hval in list(headers.items()):
+ lowkey = hkey.lower()
+ if lowkey.startswith(low_prefix):
+ cleaned = hkey.replace(low_prefix, "").replace("-", "_")
+ ret[cleaned] = hval
+ return ret
+
+
+ def set_account_metadata(self, metadata, clear=False, prefix=None):
+ """
+ Accepts a dictionary of metadata key/value pairs and updates the
+ account metadata with them.
+
+ If 'clear' is True, any existing metadata is deleted and only the
+ passed metadata is retained. Otherwise, the values passed here update
+ the account's metadata.
+
+ By default, the standard account metadata prefix ('X-Account-Meta-') is
+ prepended to the header name if it isn't present. For non-standard
+ headers, you must include a non-None prefix, such as an empty string.
+ """
+ # Add the metadata prefix, if needed.
+ if prefix is None:
+ prefix = ACCOUNT_META_PREFIX
+ massaged = _massage_metakeys(metadata, prefix)
+ new_meta = {}
+ if clear:
+ curr_meta = self.get_account_metadata(prefix=prefix)
+ for ckey in curr_meta:
+ new_meta[ckey] = ""
+ new_meta = _massage_metakeys(new_meta, prefix)
+ utils.case_insensitive_update(new_meta, massaged)
+ uri = "/"
+ resp, resp_body = self.api.method_post(uri, headers=new_meta)
+ return 200 <= resp.status_code <= 299
+
+
+ def delete_account_metadata(self, prefix=None):
+ """
+ Removes all metadata matching the specified prefix from the account.
+
+ By default, the standard account metadata prefix ('X-Account-Meta-') is
+ prepended to the header name if it isn't present. For non-standard
+ headers, you must include a non-None prefix, such as an empty string.
+ """
+ # Add the metadata prefix, if needed.
+ if prefix is None:
+ prefix = ACCOUNT_META_PREFIX
+ curr_meta = self.get_account_metadata(prefix=prefix)
+ for ckey in curr_meta:
+ curr_meta[ckey] = ""
+ new_meta = _massage_metakeys(curr_meta, prefix)
+ uri = "/"
+ resp, resp_body = self.api.method_post(uri, headers=new_meta)
+ return 200 <= resp.status_code <= 299
+
+
+ def get_metadata(self, container, prefix=None):
+ """
+ Returns a dictionary containing the metadata for the container.
+ """
+ headers = self.get_headers(container)
+ if prefix is None:
+ prefix = CONTAINER_META_PREFIX
+ low_prefix = prefix.lower()
+ ret = {}
+ for hkey, hval in list(headers.items()):
+ if hkey.lower().startswith(low_prefix):
+ cleaned = hkey.replace(low_prefix, "").replace("-", "_")
+ ret[cleaned] = hval
+ return ret
+
+
+ @_handle_container_not_found
+ def set_metadata(self, container, metadata, clear=False, prefix=None):
+ """
+ Accepts a dictionary of metadata key/value pairs and updates the
+ specified container metadata with them.
+
+ If 'clear' is True, any existing metadata is deleted and only the
+ passed metadata is retained. Otherwise, the values passed here update
+ the container's metadata.
+
+ By default, the standard container metadata prefix
+ ('X-Container-Meta-') is prepended to the header name if it isn't
+ present. For non-standard headers, you must include a non-None prefix,
+ such as an empty string.
+ """
+ # Add the metadata prefix, if needed.
+ if prefix is None:
+ prefix = CONTAINER_META_PREFIX
+ massaged = _massage_metakeys(metadata, prefix)
+ new_meta = {}
+ if clear:
+ curr_meta = self.api.get_container_metadata(container,
+ prefix=prefix)
+ for ckey in curr_meta:
+ new_meta[ckey] = ""
+ utils.case_insensitive_update(new_meta, massaged)
+ name = utils.get_name(container)
+ uri = "/%s" % name
+ resp, resp_body = self.api.method_post(uri, headers=new_meta)
+ return 200 <= resp.status_code <= 299
+
+
+ def remove_metadata_key(self, container, key):
+ """
+ Removes the specified key from the container's metadata. If the key
+ does not exist in the metadata, nothing is done.
+ """
+ meta_dict = {key: ""}
+ return self.set_metadata(container, meta_dict)
+
+
+ @_handle_container_not_found
+ def delete_metadata(self, container, prefix=None):
+ """
+ Removes all of the container's metadata.
+
+ By default, all metadata beginning with the standard container metadata
+ prefix ('X-Container-Meta-') is removed. If you wish to remove all
+ metadata beginning with a different prefix, you must specify that
+ prefix.
+ """
+ # Add the metadata prefix, if needed.
+ if prefix is None:
+ prefix = CONTAINER_META_PREFIX
+ new_meta = {}
+ curr_meta = self.get_metadata(container, prefix=prefix)
+ for ckey in curr_meta:
+ new_meta[ckey] = ""
+ uri = "/%s" % utils.get_name(container)
+ resp, resp_body = self.api.method_post(uri, headers=new_meta)
+ return 200 <= resp.status_code <= 299
+
+
+ @_handle_container_not_found
+ def get_cdn_metadata(self, container):
+ """
+ Returns a dictionary containing the CDN metadata for the container. If
+ the container does not exist, a NotFound exception is raised. If the
+ container exists, but is not CDN-enabled, a NotCDNEnabled exception is
+ raised.
+ """
+ uri = "%s/%s" % (self.uri_base, utils.get_name(container))
+ resp, resp_body = self.api.cdn_request(uri, "HEAD")
+ ret = dict(resp.headers)
+ # Remove non-CDN headers
+ ret.pop("content-length", None)
+ ret.pop("content-type", None)
+ ret.pop("date", None)
+ return ret
+
+
+ @_handle_container_not_found
+ def set_cdn_metadata(self, container, metadata):
+ """
+ Accepts a dictionary of metadata key/value pairs and updates
+ the specified container metadata with them.
+
+ NOTE: arbitrary metadata headers are not allowed. The only metadata
+ you can update are: X-Log-Retention, X-CDN-enabled, and X-TTL.
+ """
+ allowed = ("x-log-retention", "x-cdn-enabled", "x-ttl")
+ hdrs = {}
+ bad = []
+ for mkey, mval in six.iteritems(metadata):
+ if mkey.lower() not in allowed:
+ bad.append(mkey)
+ continue
+ hdrs[mkey] = str(mval)
+ if bad:
+ raise exc.InvalidCDNMetadata("The only CDN metadata you can "
+ "update are: X-Log-Retention, X-CDN-enabled, and X-TTL. "
+ "Received the following illegal item(s): %s" %
+ ", ".join(bad))
+ uri = "%s/%s" % (self.uri_base, utils.get_name(container))
+ resp, resp_body = self.api.cdn_request(uri, "POST", headers=hdrs)
+ return resp
+
+
+ def get_temp_url(self, container, obj, seconds, method="GET", key=None,
+ cached=True):
+ """
+ Given a storage object in a container, returns a URL that can be used
+ to access that object. The URL will expire after `seconds` seconds.
+
+ The only methods supported are GET and PUT. Anything else will raise
+ an `InvalidTemporaryURLMethod` exception.
+
+ If you have your Temporary URL key, you can pass it in directly and
+ potentially save an API call to retrieve it. If you don't pass in the
+ key, and don't wish to use any cached value, pass `cached=False`.
+ """
+ if not key:
+ key = self.api.get_temp_url_key(cached=cached)
+ if not key:
+ raise exc.MissingTemporaryURLKey("You must set the key for "
+ "Temporary URLs before you can generate them. This is "
+ "done via the `set_temp_url_key()` method.")
+ cname = utils.get_name(container)
+ oname = utils.get_name(obj)
+ mod_method = method.upper().strip()
+ if mod_method not in ("GET", "PUT"):
+ raise exc.InvalidTemporaryURLMethod("Method must be either 'GET' "
+ "or 'PUT'; received '%s'." % method)
+ mgt_url = self.api.management_url
+ mtch = re.search(r"/v\d/", mgt_url)
+ start = mtch.start()
+ base_url = mgt_url[:start]
+ path_parts = (mgt_url[start:], cname, oname)
+ cleaned = (part.strip("/\\") for part in path_parts)
+ pth = "/%s" % "/".join(cleaned)
+ if isinstance(pth, six.string_types):
+ pth = pth.encode(pyrax.get_encoding())
+ expires = int(time.time() + int(seconds))
+ hmac_body = "%s\n%s\n%s" % (mod_method, expires, pth)
+ try:
+ sig = hmac.new(key, hmac_body, hashlib.sha1).hexdigest()
+ except TypeError as e:
+ raise exc.UnicodePathError("Due to a bug in Python, the TempURL "
+ "function only works with ASCII object paths.")
+ temp_url = "%s%s?temp_url_sig=%s&temp_url_expires=%s" % (base_url, pth,
+ sig, expires)
+ return temp_url
+
+
+ def list_containers_info(self, limit=None, marker=None):
+ """Returns a list of info on Containers.
+
+ For each container, a dict containing the following keys is returned:
+ \code
+ name - the name of the container
+ count - the number of objects in the container
+ bytes - the total bytes in the container
+ """
+ uri = ""
+ qs = utils.dict_to_qs({"limit": limit, "marker": marker})
+ if qs:
+ uri += "%s?%s" % (uri, qs)
+ resp, resp_body = self.api.method_get(uri)
+ return resp_body
+
+
+ def list_public_containers(self):
+ """
+ Returns a list of the names of all CDN-enabled containers.
+ """
+ resp, resp_body = self.api.cdn_request("", "GET")
+ return [cont["name"] for cont in resp_body]
+
+
+ def make_public(self, container, ttl=None):
+ """
+ Enables CDN access for the specified container, and optionally sets the
+ TTL for the container.
+ """
+ return self._set_cdn_access(container, public=True, ttl=ttl)
+
+
+ def make_private(self, container):
+ """
+ Disables CDN access to a container. It may still appear public until
+ its TTL expires.
+ """
+ return self._set_cdn_access(container, public=False)
+
+
+ @_handle_container_not_found
+ def _set_cdn_access(self, container, public, ttl=None):
+ """
+ Enables or disables CDN access for the specified container, and
+ optionally sets the TTL for the container when enabling access.
+ """
+ headers = {"X-Cdn-Enabled": "%s" % public}
+ if public and ttl:
+ headers["X-Ttl"] = ttl
+ self.api.cdn_request("/%s" % utils.get_name(container), method="PUT",
+ headers=headers)
+
+
+ @_handle_container_not_found
+ def get_cdn_log_retention(self, container):
+ """
+ Returns the status of the setting for CDN log retention for the
+ specified container.
+ """
+ resp, resp_body = self.api.cdn_request("/%s" %
+ utils.get_name(container), method="HEAD")
+ return resp.headers.get("x-log-retention").lower() == "true"
+
+
+ @_handle_container_not_found
+ def set_cdn_log_retention(self, container, enabled):
+ """
+ Enables or disables whether CDN access logs for the specified container
+ are collected and stored on Cloud Files.
+ """
+ headers = {"X-Log-Retention": "%s" % enabled}
+ self.api.cdn_request("/%s" % utils.get_name(container), method="PUT",
+ headers=headers)
+
+
+ @_handle_container_not_found
+ def get_container_streaming_uri(self, container):
+ """
+ Returns the URI for streaming content, or None if CDN is not enabled.
+ """
+ resp, resp_body = self.api.cdn_request("/%s" %
+ utils.get_name(container), method="HEAD")
+ return resp.headers.get("x-cdn-streaming-uri")
+
+
+ @_handle_container_not_found
+ def get_container_ios_uri(self, container):
+ """
+ Returns the iOS URI, or None if CDN is not enabled.
+ """
+ resp, resp_body = self.api.cdn_request("/%s" %
+ utils.get_name(container), method="HEAD")
+ return resp.headers.get("x-cdn-ios-uri")
+
+
+ @_handle_container_not_found
+ def set_web_index_page(self, container, page):
+ """
+ Sets the header indicating the index page in a container
+ when creating a static website.
+
+ Note: the container must be CDN-enabled for this to have
+ any effect.
+ """
+ headers = {"X-Container-Meta-Web-Index": "%s" % page}
+ self.api.cdn_request("/%s" % utils.get_name(container), method="POST",
+ headers=headers)
+
+
+ @_handle_container_not_found
+ def set_web_error_page(self, container, page):
+ """
+ Sets the header indicating the error page in a container
+ when creating a static website.
+
+ Note: the container must be CDN-enabled for this to have
+ any effect.
+ """
+ headers = {"X-Container-Meta-Web-Error": "%s" % page}
+ self.api.cdn_request("/%s" % utils.get_name(container), method="POST",
+ headers=headers)
+
+
+ @assure_container
+ def purge_cdn_object(self, container, obj, email_addresses=None):
+ """
+ Removes a CDN-enabled object from public access before the TTL expires.
+ Please note that there is a limit (at this time) of 25 such requests;
+ if you need to purge more than that, you must contact support.
+
+ If one or more email_addresses are included, an email confirming the
+ purge is sent to each address.
+ """
+ return container.purge_cdn_object(obj, email_addresses=email_addresses)
+
+
+ @assure_container
+ def list_objects(self, container, limit=None, marker=None, prefix=None,
+ delimiter=None, end_marker=None, full_listing=False):
+ """
+ Return a list of StorageObjects representing the objects in this
+ container. You can use the marker, end_marker, and limit params to
+ handle pagination, and the prefix and delimiter params to filter the
+ objects returned. By default only the first 10,000 objects are
+ returned; if you need to access more than that, set the 'full_listing'
+ parameter to True.
+ """
+ if full_listing:
+ return container.list_all(prefix=prefix)
+ return container.list(limit=limit, marker=marker, prefix=prefix,
+ delimiter=delimiter, end_marker=end_marker)
+
+
+ @assure_container
+ def list_object_names(self, container, marker=None, limit=None, prefix=None,
+ delimiter=None, end_marker=None, full_listing=False):
+ """
+ Return a list of then names of the objects in this container. You can
+ use the marker, end_marker, and limit params to handle pagination, and
+ the prefix and delimiter params to filter the objects returned. By
+ default only the first 10,000 objects are returned; if you need to
+ access more than that, set the 'full_listing' parameter to True.
+ """
+ return container.list_object_names(marker=marker, limit=limit,
+ prefix=prefix, delimiter=delimiter, end_marker=end_marker,
+ full_listing=full_listing)
+
+
+ @assure_container
+ def object_listing_iterator(self, container, prefix=None):
+ """
+ Returns an iterator that can be used to access the objects within this
+ container. They can be optionally limited by a prefix.
+ """
+ return StorageObjectIterator(container.object_manager, prefix=prefix)
+
+
+ @assure_container
+ def list_subdirs(self, container, marker=None, limit=None, prefix=None,
+ delimiter=None, full_listing=False):
+ """
+ Returns a list of StorageObjects representing the pseudo-subdirectories
+ in the specified container. You can use the marker and limit params to
+ handle pagination, and the prefix param to filter the objects returned.
+ The 'delimiter' parameter is ignored, as the only meaningful value is
+ '/'.
+ """
+ mthd = container.list_all if full_listing else container.list
+ objs = mthd(marker=marker, limit=limit, prefix=prefix, delimiter="/",
+ return_raw=True)
+ sdirs = [obj for obj in objs if "subdir" in obj]
+ for sdir in sdirs:
+ sdir["name"] = sdir["subdir"]
+ mgr = container.object_manager
+ return [StorageObject(mgr, sdir) for sdir in sdirs]
+
+
+ @assure_container
+ def get_object(self, container, obj):
+ """
+ Returns a StorageObject representing the requested object.
+ """
+ return container.get_object(obj)
+
+
+ @assure_container
+ def create_object(self, container, file_or_path=None, data=None,
+ obj_name=None, content_type=None, etag=None, content_encoding=None,
+ content_length=None, ttl=None, chunked=False, metadata=None,
+ chunk_size=None, headers=None, return_none=False):
+ """
+ Creates or replaces a storage object in the specified container.
+ Returns a StorageObject reference will be returned, unless the
+ 'return_none' parameter is True.
+
+ The content of the object can either be a stream of bytes (`data`), or
+ a file on disk (`file_or_path`). The disk file can be either an open
+ file-like object, or an absolute path to the file on disk.
+
+ When creating object from a data stream, you must specify the name of
+ the object to be created in the container via the `obj_name` parameter.
+ When working with a file, though, if no `obj_name` value is specified,
+ the file`s name will be used.
+
+ You may optionally set the `content_type` and `content_encoding`
+ parameters; pyrax will create the appropriate headers when the object
+ is stored. If no `content_type` is specified, the object storage system
+ will make an intelligent guess based on the content of the object.
+
+ If the size of the file is known, it can be passed as `content_length`.
+
+ If you wish for the object to be temporary, specify the time it should
+ be stored in seconds in the `ttl` parameter. If this is specified, the
+ object will be deleted after that number of seconds.
+ """
+ return container.create(file_or_path=file_or_path, data=data,
+ obj_name=obj_name, content_type=content_type, etag=etag,
+ content_encoding=content_encoding,
+ content_length=content_length, ttl=ttl, chunked=chunked,
+ metadata=metadata, chunk_size=chunk_size, headers=headers,
+ return_none=return_none)
+
+
+ @assure_container
+ def fetch_object(self, container, obj, include_meta=False,
+ chunk_size=None, size=None, extra_info=None):
+ """
+ Fetches the object from storage.
+
+ If 'include_meta' is False, only the bytes representing the
+ stored object are returned.
+
+ Note: if 'chunk_size' is defined, you must fully read the object's
+ contents before making another request.
+
+ If 'size' is specified, only the first 'size' bytes of the object will
+ be returned. If the object if smaller than 'size', the entire object is
+ returned.
+
+ When 'include_meta' is True, what is returned from this method is a
+ 2-tuple:
+ Element 0: a dictionary containing metadata about the file.
+ Element 1: a stream of bytes representing the object's contents.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ return container.fetch(obj, include_meta=include_meta,
+ chunk_size=chunk_size, size=size)
+
+
+ @assure_container
+ def fetch_partial(self, container, obj, size):
+ """
+ Returns the first 'size' bytes of an object. If the object is smaller
+ than the specified 'size' value, the entire object is returned.
+ """
+ return container.fetch_partial(obj, size)
+
+
+ @assure_container
+ def download_object(self, container, obj, directory, structure=True):
+ """
+ Fetches the object from storage, and writes it to the specified
+ directory. The directory must exist before calling this method.
+
+ If the object name represents a nested folder structure, such as
+ "foo/bar/baz.txt", that folder structure will be created in the target
+ directory by default. If you do not want the nested folders to be
+ created, pass `structure=False` in the parameters.
+ """
+ return container.download(obj, directory, structure=structure)
+
+
+ @assure_container
+ def delete_object(self, container, obj):
+ """
+ Deletes the object from the specified container.
+
+ The 'obj' parameter can either be the name of the object, or a
+ StorageObject representing the object to be deleted.
+ """
+ return container.delete_object(obj)
+
+
+ @_handle_container_not_found
+ def copy_object(self, container, obj, new_container, new_obj_name=None,
+ content_type=None):
+ """
+ Copies the object to the new container, optionally giving it a new name.
+ If you copy to the same container, you must supply a different name.
+
+ Returns the etag of the newly-copied object.
+
+ You can optionally change the content_type of the object by supplying
+ that in the 'content_type' parameter.
+ """
+ nm = new_obj_name or utils.get_name(obj)
+ uri = "/%s/%s" % (utils.get_name(new_container), nm)
+ copy_from = "/%s/%s" % (utils.get_name(container), utils.get_name(obj))
+ headers = {"X-Copy-From": copy_from,
+ "Content-Length": "0"}
+ if content_type:
+ headers["Content-Type"] = content_type
+ resp, resp_body = self.api.method_put(uri, headers=headers)
+ return resp.headers.get("etag")
+
+
+ def move_object(self, container, obj, new_container, new_obj_name=None,
+ new_reference=False, content_type=None):
+ """
+ Works just like copy_object, except that the source object is deleted
+ after a successful copy.
+
+ You can optionally change the content_type of the object by supplying
+ that in the 'content_type' parameter.
+
+ NOTE: any references to the original object will no longer be valid;
+ you will have to get a reference to the new object by passing True for
+ the 'new_reference' parameter. When this is True, a reference to the
+ newly moved object is returned. Otherwise, the etag for the moved
+ object is returned.
+ """
+ new_obj_etag = self.copy_object(container, obj, new_container,
+ new_obj_name=new_obj_name, content_type=content_type)
+ if not new_obj_etag:
+ return
+ # Copy succeeded; delete the original.
+ self.delete_object(container, obj)
+ if new_reference:
+ nm = new_obj_name or utils.get_name(obj)
+ return self.get_object(new_container, nm)
+ return new_obj_etag
+
+
+ @assure_container
+ def change_object_content_type(self, container, obj, new_ctype,
+ guess=False):
+ """
+ Copies object to itself, but applies a new content-type. The guess
+ feature requires the container to be CDN-enabled. If not, then the
+ content-type must be supplied. If using guess with a CDN-enabled
+ container, new_ctype can be set to None. Failure during the put will
+ result in an exception.
+ """
+ cname = utils.get_name(container)
+ oname = utils.get_name(obj)
+ if guess and container.cdn_enabled:
+ # Test against the CDN url to guess the content-type.
+ obj_url = "%s/%s" % (container.cdn_uri, oname)
+ new_ctype = mimetypes.guess_type(obj_url)[0]
+ return self.copy_object(container, obj, container,
+ content_type=new_ctype)
+
+
+ def delete_object_in_seconds(self, cont, obj, seconds, extra_info=None):
+ """
+ Sets the object in the specified container to be deleted after the
+ specified number of seconds.
+
+ The 'extra_info' parameter is included for backwards compatibility. It
+ is no longer used at all, and will not be modified with swiftclient
+ info, since swiftclient is not used any more.
+ """
+ meta = {"X-Delete-After": seconds}
+ self.set_object_metadata(cont, obj, meta, clear=True, prefix="")
+
+
+ @assure_container
+ def get_object_metadata(self, container, obj):
+ """
+ Returns the metadata for the specified object as a dict.
+ """
+ return container.get_object_metadata(obj)
+
+
+ @assure_container
+ def set_object_metadata(self, container, obj, metadata, clear=False,
+ extra_info=None, prefix=None):
+ """
+ Accepts a dictionary of metadata key/value pairs and updates the
+ specified object metadata with them.
+
+ If 'clear' is True, any existing metadata is deleted and only the
+ passed metadata is retained. Otherwise, the values passed here update
+ the object's metadata.
+
+ 'extra_info; is an optional dictionary which will be populated with
+ 'status', 'reason', and 'headers' keys from the underlying swiftclient
+ call.
+
+ By default, the standard object metadata prefix ('X-Object-Meta-') is
+ prepended to the header name if it isn't present. For non-standard
+ headers, you must include a non-None prefix, such as an empty string.
+ """
+ return container.set_object_metadata(obj, metadata, clear=clear,
+ prefix=prefix)
+
+
+
+class StorageObject(BaseResource):
+ """
+ This class represents an object stored in a Container.
+ """
+ def __init__(self, manager, info, *args, **kwargs):
+ self._container = None
+ return super(StorageObject, self).__init__(manager, info, *args,
+ **kwargs)
+
+
+ def __repr__(self):
+ return "