Merge pull request #4421 from chrismeyersfsu/improvement-inventory_bump

Improvement inventory bump
This commit is contained in:
Chris Meyers 2016-12-14 10:24:40 -05:00 committed by GitHub
commit 34c3895768
8 changed files with 564 additions and 130 deletions

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
@ -786,11 +786,11 @@ class AzureInventory(object):
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC))
sys.exit("The Azure python sdk is not installed (try 'pip install azure>=2.0.0rc5') - {0}".format(HAS_AZURE_EXC))
if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION):
if LooseVersion(azure_compute_version) < LooseVersion(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
"Do you have Azure >= 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
@ -459,4 +459,3 @@ class CloudFormsInventory(object):
return json.dumps(data)
CloudFormsInventory()

View File

@ -29,23 +29,41 @@ regions_exclude = us-gov-west-1,cn-north-1
# in the event of a collision.
destination_variable = public_dns_name
# This allows you to override the inventory_name with an ec2 variable, instead
# of using the destination_variable above. Addressing (aka ansible_ssh_host)
# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
#hostname_variable = tag_Name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory untill You set:
# vpc_destination_variable = 'private_ip_address'
# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
# vpc_destination_variable = private_ip_address
vpc_destination_variable = ip_address
# The following two settings allow flexible ansible host naming based on a
# python format string and a comma-separated list of ec2 tags. Note that:
#
# 1) If the tags referenced are not present for some instances, empty strings
# will be substituted in the format string.
# 2) This overrides both destination_variable and vpc_destination_variable.
#
#destination_format = {0}.{1}.example.com
#destination_format_tags = Name,environment
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
# To exclude RDS instances from the inventory, uncomment and set to False.
#rds = False
rds = False
# To exclude ElastiCache instances from the inventory, uncomment and set to False.
elasticache = False
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
@ -55,10 +73,30 @@ route53 = False
# 'all_instances' to True to return all instances regardless of state.
all_instances = False
# By default, only EC2 instances in the 'running' state are returned. Specify
# EC2 instance states to return as a comma-separated list. This
# option is overriden when 'all_instances' is True.
# instance_states = pending, running, shutting-down, terminated, stopping, stopped
# By default, only RDS instances in the 'available' state are returned. Set
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
# Include RDS cluster information (Aurora etc.)
include_rds_clusters = False
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
#
# Note that all_elasticache_nodes only applies to listed clusters. That means
# if you set all_elastic_clusters to false, no node will be return from
# unavailable clusters, regardless of the state and to what you set for
# all_elasticache_nodes.
all_elasticache_replication_groups = False
all_elasticache_clusters = False
all_elasticache_nodes = False
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
@ -69,11 +107,18 @@ cache_path = ~/.ansible/tmp
# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
cache_max_age = 0
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = True
# If set to true, any tag of the form "a,b,c" is expanded into a list
# and the results are used to create additional tag_* inventory groups.
expand_csv_tags = True
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = True
@ -89,6 +134,10 @@ group_by_tag_none = True
group_by_route53_names = True
group_by_rds_engine = True
group_by_rds_parameter_group = True
group_by_elasticache_engine = True
group_by_elasticache_cluster = True
group_by_elasticache_parameter_group = True
group_by_elasticache_replication_group = True
# If you only want to include hosts that match a certain regular expression
# pattern_include = staging-*
@ -113,5 +162,28 @@ group_by_rds_parameter_group = True
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
# (ex. webservers15, webservers1a, webservers123 etc)
# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
[credentials]
# The AWS credentials can optionally be specified here. Credentials specified
# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
# AWS_PROFILE is set, or if the boto_profile property above is set.
#
# Supplying AWS credentials here is not recommended, as it introduces
# non-trivial security concerns. When going down this route, please make sure
# to set access permissions for this file correctly, e.g. handle it the same
# way as you would a private SSH key.
#
# Unlike the boto and AWS configure files, this section does not support
# profiles.
#
# aws_access_key_id = AXXXXXXXXXXXXXX
# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX

View File

@ -37,6 +37,7 @@ When run against a specific host, this script returns the following variables:
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
@ -131,6 +132,15 @@ from boto import elasticache
from boto import route53
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
@ -265,6 +275,12 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances?
if config.has_option('ec2', 'include_rds_clusters'):
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
else:
self.include_rds_clusters = False
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
@ -474,6 +490,8 @@ class Ec2Inventory(object):
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
if self.include_rds_clusters:
self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
@ -527,6 +545,7 @@ class Ec2Inventory(object):
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
@ -573,6 +592,65 @@ class Ec2Inventory(object):
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def include_rds_clusters_by_region(self, region):
if not HAS_BOTO3:
self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
"getting RDS clusters")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
marker, clusters = '', []
while marker is not None:
resp = client.describe_db_clusters(Marker=marker)
clusters.extend(resp["DBClusters"])
marker = resp.get('Marker', None)
account_id = boto.connect_iam().get_user().arn.split(':')[4]
c_dict = {}
for c in clusters:
# remove these datetime objects as there is no serialisation to json
# currently in place and we don't need the data yet
if 'EarliestRestorableTime' in c:
del c['EarliestRestorableTime']
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if self.ec2_instance_filters == {}:
matches_filter = True
else:
matches_filter = False
try:
# arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
tags = client.list_tags_for_resource(
ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
except Exception as e:
if e.message.find('DBInstanceNotFound') >= 0:
# AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
# Ignore errors when trying to find tags for these
pass
# ignore empty clusters caused by AWS bug
if len(c['DBClusterMembers']) == 0:
continue
elif matches_filter:
c_dict[c['DBClusterIdentifier']] = c
self.inventory['db_clusters'] = c_dict
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
@ -1235,7 +1313,7 @@ class Ec2Inventory(object):
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = map(lambda x: x.strip(), v.split(','))
v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@ -1246,6 +1324,10 @@ class Ec2Inventory(object):
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful

View File

@ -69,7 +69,8 @@ Examples:
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
@ -83,13 +84,19 @@ except ImportError:
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
USER_AGENT_VERSION="v2"
import sys
import os
import argparse
from time import time
import ConfigParser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
try:
import json
except ImportError:
@ -100,33 +107,103 @@ try:
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
print("GCE inventory script requires libcloud >= 0.13")
sys.exit(1)
sys.exit("GCE inventory script requires libcloud >= 0.13")
class CloudInventoryCache(object):
def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
cache_max_age=300):
cache_dir = os.path.expanduser(cache_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = os.path.join(cache_dir, cache_name)
self.cache_max_age = cache_max_age
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data)
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True
class GceInventory(object):
def __init__(self):
# Cache object
self.cache = None
# dictionary containing inventory read from disk
self.inventory = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.config = self.get_config()
self.driver = self.get_gce_driver()
self.ip_type = self.get_inventory_options()
if self.ip_type:
self.ip_type = self.ip_type.lower()
# Cache management
start_inventory_time = time()
cache_used = False
if self.args.refresh_cache or not self.cache.is_valid():
self.do_api_calls_update_cache()
else:
self.load_inventory_from_cache()
cache_used = True
self.inventory['_meta']['stats'] = {'use_cache': True}
self.inventory['_meta']['stats'] = {
'inventory_load_time': time() - start_inventory_time,
'cache_used': cache_used
}
# Just display data for specific host
if self.args.host:
print(self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
pretty=self.args.pretty))
sys.exit(0)
zones = self.parse_env_zones()
# Otherwise, assume user wants all instances grouped
print(self.json_format_dict(self.group_instances(zones),
pretty=self.args.pretty))
print(self.json_format_dict(
self.inventory['_meta']['hostvars'][self.args.host],
pretty=self.args.pretty))
else:
# Otherwise, assume user wants all instances grouped
zones = self.parse_env_zones()
print(self.json_format_dict(self.inventory,
pretty=self.args.pretty))
sys.exit(0)
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
@ -141,14 +218,57 @@ class GceInventory(object):
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = config.get('gce', 'libcloud_secrets')
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
@ -162,8 +282,7 @@ class GceInventory(object):
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
@ -174,10 +293,10 @@ class GceInventory(object):
pass
if not secrets_found:
args = [
config.get('gce','gce_service_account_email_address'),
config.get('gce','gce_service_account_pem_file_path')
self.config.get('gce','gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': config.get('gce', 'gce_project_id')}
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
@ -211,6 +330,9 @@ class GceInventory(object):
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
@ -220,11 +342,17 @@ class GceInventory(object):
if inst is None:
return {}
if inst.extra['metadata'].has_key('items'):
if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
else:
ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
@ -240,15 +368,36 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
'ansible_ssh_host': ssh_host
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
return self.driver.ex_get_node(instance_name)
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
return None
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data
def list_nodes(self):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
self.driver.connection.gce_params=params
all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
def group_instances(self, zones=None):
'''Group all instances'''
@ -256,7 +405,18 @@ class GceInventory(object):
meta = {}
meta["hostvars"] = {}
for node in self.driver.list_nodes():
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
@ -268,7 +428,7 @@ class GceInventory(object):
if zones and zone not in zones:
continue
if groups.has_key(zone): groups[zone].append(name)
if zone in groups: groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
@ -277,25 +437,25 @@ class GceInventory(object):
tag = t[6:]
else:
tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
if tag in groups: groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if groups.has_key(net): groups[net].append(name)
if net in groups: groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
if groups.has_key(machine_type): groups[machine_type].append(name)
if machine_type in groups: groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if groups.has_key(image): groups[image].append(name)
if image in groups: groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if groups.has_key(stat): groups[stat].append(name)
if stat in groups: groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
@ -311,6 +471,6 @@ class GceInventory(object):
else:
return json.dumps(data)
# Run the script
GceInventory()
if __name__ == '__main__':
GceInventory()

View File

@ -2,7 +2,8 @@
# Copyright (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
# Copyright (c) 2013, Jesse Keating <jesse.keating@rackspace.com>
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
# Copyright (c) 2016, Rackspace Australia
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -18,7 +19,7 @@
# along with this software. If not, see <http://www.gnu.org/licenses/>.
# The OpenStack Inventory module uses os-client-config for configuration.
# https://github.com/stackforge/os-client-config
# https://github.com/openstack/os-client-config
# This means it will either:
# - Respect normal OS_* environment variables like other OpenStack tools
# - Read values from a clouds.yaml file.
@ -32,12 +33,24 @@
# all of them and present them as one contiguous inventory.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
# the inventory section.
# expand_hostvars controls whether or not the inventory will make extra API
# calls to fill out additional information about each server
# use_hostnames changes the behavior from registering every host with its UUID
# and making a group of its hostname to only doing this if the
# hostname in question has more than one server
# fail_on_errors causes the inventory to fail and return no hosts if one cloud
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
import argparse
import collections
import os
import sys
import time
from distutils.version import StrictVersion
try:
import json
@ -46,89 +59,137 @@ except:
import os_client_config
import shade
import shade.inventory
CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml']
class OpenStackInventory(object):
def get_groups_from_server(server_vars, namegroup=True):
groups = []
def __init__(self, private=False, refresh=False):
config_files = os_client_config.config.CONFIG_FILES
config_files.append('/etc/ansible/openstack.yml')
self.openstack_config = os_client_config.config.OpenStackConfig(
config_files)
self.clouds = shade.openstack_clouds(self.openstack_config)
self.private = private
self.refresh = refresh
region = server_vars['region']
cloud = server_vars['cloud']
metadata = server_vars.get('metadata', {})
self.cache_max_age = self.openstack_config.get_cache_max_age()
cache_path = self.openstack_config.get_cache_path()
# Create a group for the cloud
groups.append(cloud)
# Cache related
if not os.path.exists(cache_path):
os.makedirs(cache_path)
self.cache_file = os.path.join(cache_path, "ansible-inventory.cache")
# Create a group on region
groups.append(region)
def is_cache_stale(self):
''' Determines if cache file has expired, or if it is still valid '''
if os.path.isfile(self.cache_file):
mod_time = os.path.getmtime(self.cache_file)
current_time = time.time()
if (mod_time + self.cache_max_age) > current_time:
return False
return True
# And one by cloud_region
groups.append("%s_%s" % (cloud, region))
def get_host_groups(self):
if self.refresh or self.is_cache_stale():
groups = self.get_host_groups_from_cloud()
self.write_cache(groups)
# Check if group metadata key in servers' metadata
if 'group' in metadata:
groups.append(metadata['group'])
for extra_group in metadata.get('groups', '').split(','):
if extra_group:
groups.append(extra_group.strip())
groups.append('instance-%s' % server_vars['id'])
if namegroup:
groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(metadata.items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud, region, az))
return groups
def get_host_groups(inventory, refresh=False):
(cache_file, cache_expiration_time) = get_cache_settings()
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups)
else:
groups = open(cache_file, 'r').read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
openstack=server)
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
firstpass = collections.defaultdict(list)
hostvars = {}
list_args = {}
if hasattr(inventory, 'extra_config'):
use_hostnames = inventory.extra_config['use_hostnames']
list_args['expand'] = inventory.extra_config['expand_hostvars']
if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"):
list_args['fail_on_cloud_config'] = \
inventory.extra_config['fail_on_errors']
else:
use_hostnames = False
for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
firstpass[server['name']].append(server)
for name, servers in firstpass.items():
if len(servers) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
return json.load(open(self.cache_file, 'r'))
return groups
server_ids = set()
# Trap for duplicate results
for server in servers:
server_ids.add(server['id'])
if len(server_ids) == 1 and use_hostnames:
append_hostvars(hostvars, groups, name, servers[0])
else:
for server in servers:
append_hostvars(
hostvars, groups, server['id'], server,
namegroup=True)
groups['_meta'] = {'hostvars': hostvars}
return groups
def write_cache(self, groups):
with open(self.cache_file, 'w') as cache_file:
cache_file.write(self.json_format_dict(groups))
def get_host_groups_from_cloud(self):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
if refresh:
return True
if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0:
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if (mod_time + cache_expiration_time) > current_time:
return False
return True
for cloud in self.clouds:
cloud.private = cloud.private or self.private
# Cycle on servers
for server in cloud.list_servers():
def get_cache_settings():
config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
return (cache_file, cache_expiration_time)
meta = cloud.get_server_meta(server)
if 'interface_ip' not in meta['server_vars']:
# skip this host if it doesn't have a network address
continue
server_vars = meta['server_vars']
hostvars[server.name][
'ansible_ssh_host'] = server_vars['interface_ip']
hostvars[server.name]['openstack'] = server_vars
for group in meta['groups']:
groups[group].append(server.name)
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
return groups
def json_format_dict(self, data):
return json.dumps(data, sort_keys=True, indent=2)
def list_instances(self):
groups = self.get_host_groups()
# Return server list
print(self.json_format_dict(groups))
def get_host(self, hostname):
groups = self.get_host_groups()
hostvars = groups['_meta']['hostvars']
if hostname in hostvars:
print(self.json_format_dict(hostvars[hostname]))
def to_json(in_dict):
return json.dumps(in_dict, sort_keys=True, indent=2)
def parse_args():
@ -138,21 +199,43 @@ def parse_args():
help='Use private address for ansible host')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
return parser.parse_args()
def main():
args = parse_args()
try:
inventory = OpenStackInventory(args.private, args.refresh)
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
shade.simple_logging(debug=args.debug)
inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
)
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
config_key='ansible',
config_defaults={
'use_hostnames': False,
'expand_hostvars': True,
'fail_on_errors': True,
}
))
inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
inventory.list_instances()
output = get_host_groups(inventory, refresh=args.refresh)
elif args.host:
inventory.get_host(args.host)
output = to_json(inventory.get_host(args.host))
print(output)
except shade.OpenStackCloudException as e:
sys.stderr.write('%s\n' % e.message)
sys.exit(1)

View File

@ -26,3 +26,7 @@ clouds:
username: stack
password: stack
project_name: stack
ansible:
use_hostnames: False
expand_hostvars: True
fail_on_errors: True

View File

@ -35,11 +35,12 @@ import json
import logging
import optparse
import os
import ssl
import sys
import time
import ConfigParser
from six import text_type
from six import text_type, string_types
# Disable logging message trigged by pSphere/suds.
try:
@ -54,7 +55,7 @@ logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource
from suds.sudsobject import Object as SudsObject
@ -90,6 +91,28 @@ class VMwareInventory(object):
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
sslcheck = os.environ.get('VMWARE_SSLCHECK')
if not sslcheck and self.config.has_option('auth', 'sslcheck'):
sslcheck = self.config.get('auth', 'sslcheck')
if not sslcheck:
sslcheck = True
else:
if sslcheck.lower() in ['no', 'false']:
sslcheck = False
else:
sslcheck = True
# Limit the clusters being scanned
self.filter_clusters = os.environ.get('VMWARE_CLUSTERS')
if not self.filter_clusters and self.config.has_option('defaults', 'clusters'):
self.filter_clusters = self.config.get('defaults', 'clusters')
if self.filter_clusters:
self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()]
# Override certificate checks
if not sslcheck:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
@ -137,7 +160,7 @@ class VMwareInventory(object):
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, basestring) for x in v]):
if all([isinstance(x, string_types) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
@ -185,7 +208,7 @@ class VMwareInventory(object):
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, int, long, float, basestring)):
elif isinstance(obj, (type(None), bool, int, long, float, string_types)):
return obj
else:
return ()
@ -314,8 +337,19 @@ class VMwareInventory(object):
else:
prefix_filter = None
if self.filter_clusters:
# Loop through clusters and find hosts:
hosts = []
for cluster in ClusterComputeResource.all(self.client):
if cluster.name in self.filter_clusters:
for host in cluster.host:
hosts.append(host)
else:
# Get list of all physical hosts
hosts = HostSystem.all(self.client)
# Loop through physical hosts:
for host in HostSystem.all(self.client):
for host in hosts:
if not self.guests_only:
self._add_host(inv, 'all', host.name)