Update inventory scripts

ec2
- added support for tags and instance attributes
- allow filtering RDS instances by tags
- add option to group by platform
- set missing defaults
- make cache unique to script ran
- bug fixes
- implement AND'd filters
azure_rm
- minor python 3 upgrades
cloudforms
- minor regex fix
foreman
- several new configurables
- changes to caching
gce
- python 3 upgrades
- added gce_subnetwork param
openstack
- added `--cloud` parameter
ovirt4
- obtain defaults from env vars
vmware_inventory
- changed imports
- allow for custom filters
- changed host_filters
- error handling
- python 3 upgrades
This commit is contained in:
AlanCoding 2018-02-26 12:45:54 -05:00
parent 9493b72f29
commit b878a844d0
No known key found for this signature in database
GPG Key ID: FD2C3C012A72926B
11 changed files with 662 additions and 357 deletions

View File

@ -187,14 +187,18 @@ Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
import inspect
import traceback
try:
# python2
import ConfigParser as cp
except ImportError:
# python3
import configparser as cp
from packaging.version import Version
@ -326,7 +330,7 @@ class AzureRM(object):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config = cp.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
@ -616,6 +620,7 @@ class AzureInventory(object):
# Add windows details
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
host_vars['ansible_connection'] = 'winrm'
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
@ -795,7 +800,7 @@ class AzureInventory(object):
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config = cp.ConfigParser()
config.read(path)
except:
pass
@ -838,9 +843,9 @@ class AzureInventory(object):
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
regex += r"\-"
return re.sub(regex + "]", "_", word)

View File

@ -468,7 +468,7 @@ class CloudFormsInventory(object):
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
"""
if self.cloudforms_clean_group_keys:
regex = "[^A-Za-z0-9\_]"
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
else:
return word

View File

@ -10,8 +10,9 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1, us-west-1, us-west-2'
# 'auto' is AWS_REGION or AWS_DEFAULT_REGION environment variable.
# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
# AWS_DEFAULT_REGION environment variable will be read to determine the region.
regions = all
regions_exclude = us-gov-west-1, cn-north-1
@ -134,6 +135,7 @@ group_by_aws_account = False
group_by_ami_id = True
group_by_instance_type = True
group_by_instance_state = False
group_by_platform = True
group_by_key_pair = True
group_by_vpc_id = True
group_by_security_group = True
@ -157,7 +159,9 @@ group_by_elasticache_replication_group = True
# inventory. For the full list of possible filters, please read the EC2 API
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
# Filters are key/value pairs separated by '=', to list multiple filters use
# a list separated by commas. See examples below.
# a list separated by commas. To "AND" criteria together, use "&". Note that
# the "AND" is not useful along with stack_filters and so such usage is not allowed.
# See examples below.
# If you want to apply multiple filters simultaneously, set stack_filters to
# True. Default behaviour is to combine the results of all filters. Stacking
@ -179,6 +183,18 @@ stack_filters = False
# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# Retrieve only instances of type t1.micro that also have tag env=stage
# instance_filters = instance-type=t1.micro&tag:env=stage
# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
# that are of type m3.large, regardless of env tag
# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
# An IAM role can be assumed, so all requests are run as that role.
# This can be useful for connecting across different accounts, or to limit user
# access
# iam_role = role-arn
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name

View File

@ -12,9 +12,9 @@ variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
optional region environement variable if region is 'auto'
Optional region environment variable if region is 'auto'
This script also assumes there is an ec2.ini file alongside it. To specify a
This script also assumes that there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
@ -95,12 +95,37 @@ consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
In addition, if an instance has AWS tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
When destination_format and destination_format_tags are specified
the destination_format can be built from the instance tags and attributes.
The behavior will first check the user defined tags, then proceed to
check instance attributes, and finally if neither are found 'nil' will
be used instead.
'my_instance': {
'region': 'us-east-1', # attribute
'availability_zone': 'us-east-1a', # attribute
'private_dns_name': '172.31.0.1', # attribute
'ec2_tag_deployment': 'blue', # tag
'ec2_tag_clusterid': 'ansible', # tag
'ec2_tag_Name': 'webserver', # tag
...
}
Inside of the ec2.ini file the following settings are specified:
...
destination_format: {0}-{1}-{2}-{3}
destination_format_tags: Name,clusterid,deployment,private_dns_name
...
These settings would produce a destination_format as the following:
'webserver-ansible-blue-172.31.0.1'
'''
# (c) 2012, Peter Sankauskas
@ -132,13 +157,14 @@ from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
from boto import sts
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3
import boto3 # noqa
HAS_BOTO3 = True
except ImportError:
pass
@ -151,11 +177,65 @@ try:
except ImportError:
import simplejson as json
DEFAULTS = {
'all_elasticache_clusters': 'False',
'all_elasticache_nodes': 'False',
'all_elasticache_replication_groups': 'False',
'all_instances': 'False',
'all_rds_instances': 'False',
'aws_access_key_id': None,
'aws_secret_access_key': None,
'aws_security_token': None,
'boto_profile': None,
'cache_max_age': '300',
'cache_path': '~/.ansible/tmp',
'destination_variable': 'public_dns_name',
'elasticache': 'True',
'eucalyptus': 'False',
'eucalyptus_host': None,
'expand_csv_tags': 'False',
'group_by_ami_id': 'True',
'group_by_availability_zone': 'True',
'group_by_aws_account': 'False',
'group_by_elasticache_cluster': 'True',
'group_by_elasticache_engine': 'True',
'group_by_elasticache_parameter_group': 'True',
'group_by_elasticache_replication_group': 'True',
'group_by_instance_id': 'True',
'group_by_instance_state': 'False',
'group_by_instance_type': 'True',
'group_by_key_pair': 'True',
'group_by_platform': 'True',
'group_by_rds_engine': 'True',
'group_by_rds_parameter_group': 'True',
'group_by_region': 'True',
'group_by_route53_names': 'True',
'group_by_security_group': 'True',
'group_by_tag_keys': 'True',
'group_by_tag_none': 'True',
'group_by_vpc_id': 'True',
'hostname_variable': None,
'iam_role': None,
'include_rds_clusters': 'False',
'nested_groups': 'False',
'pattern_exclude': None,
'pattern_include': None,
'rds': 'False',
'regions': 'all',
'regions_exclude': 'us-gov-west-1, cn-north-1',
'replace_dash_in_groups': 'True',
'route53': 'False',
'route53_excluded_zones': '',
'route53_hostnames': None,
'stack_filters': 'False',
'vpc_destination_variable': 'ip_address'
}
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
@ -204,7 +284,6 @@ class Ec2Inventory(object):
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@ -217,7 +296,6 @@ class Ec2Inventory(object):
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
@ -225,35 +303,50 @@ class Ec2Inventory(object):
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {'ec2': {
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
defaults = {
'ec2': {
'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'),
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
}
}
if six.PY3:
config = configparser.ConfigParser()
config = configparser.ConfigParser(DEFAULTS)
else:
config = configparser.SafeConfigParser()
config = configparser.SafeConfigParser(DEFAULTS)
ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
config.read(ec2_ini_path)
if not os.path.isfile(ec2_ini_path):
ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback'])
if os.path.isfile(ec2_ini_path):
config.read(ec2_ini_path)
# Add empty sections if they don't exist
try:
config.add_section('ec2')
except configparser.DuplicateSectionError:
pass
try:
config.add_section('credentials')
except configparser.DuplicateSectionError:
pass
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
configRegions_exclude = config.get('ec2', 'regions_exclude')
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
@ -263,16 +356,12 @@ class Ec2Inventory(object):
env_region = os.environ.get('AWS_REGION')
if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [ env_region ]
self.regions = [env_region]
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'hostname_variable'):
self.hostname_variable = config.get('ec2', 'hostname_variable')
else:
self.hostname_variable = None
self.hostname_variable = config.get('ec2', 'hostname_variable')
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
@ -284,36 +373,22 @@ class Ec2Inventory(object):
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
if config.has_option('ec2', 'route53_hostnames'):
self.route53_hostnames = config.get('ec2', 'route53_hostnames')
else:
self.route53_hostnames = None
self.route53_hostnames = config.get('ec2', 'route53_hostnames')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a]
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances?
if config.has_option('ec2', 'include_rds_clusters'):
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
else:
self.include_rds_clusters = False
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
self.all_instances = config.getboolean('ec2', 'all_instances')
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
@ -338,49 +413,30 @@ class Ec2Inventory(object):
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
# boto configuration profile (prefer CLI argument then environment variables then config file)
self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE')
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
self.boto_profile = self.args.boto_profile or \
os.environ.get('AWS_PROFILE') or \
config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
if config.has_option('credentials', 'aws_access_key_id'):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
else:
aws_access_key_id = None
if config.has_option('credentials', 'aws_secret_access_key'):
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
else:
aws_secret_access_key = None
if config.has_option('credentials', 'aws_security_token'):
aws_security_token = config.get('credentials', 'aws_security_token')
else:
aws_security_token = None
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
aws_security_token = config.get('credentials', 'aws_security_token')
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
@ -400,111 +456,79 @@ class Ec2Inventory(object):
cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
if cache_id:
cache_name = '%s-%s' % (cache_name, cache_id)
cache_name += '-' + str(abs(hash(__file__)))[1:7]
self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if config.has_option('ec2', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
else:
self.expand_csv_tags = False
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
self.nested_groups = config.getboolean('ec2', 'nested_groups')
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
# IAM role to assume for connection
self.iam_role = config.get('ec2', 'iam_role')
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_instance_state',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
'group_by_aws_account',
]
group_by_options = [a for a in DEFAULTS if a.startswith('group_by')]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
setattr(self, option, config.getboolean('ec2', option))
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
self.pattern_include = config.get('ec2', 'pattern_include')
if self.pattern_include:
self.pattern_include = re.compile(self.pattern_include)
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
self.pattern_exclude = config.get('ec2', 'pattern_exclude')
if self.pattern_exclude:
self.pattern_exclude = re.compile(self.pattern_exclude)
# Do we want to stack multiple filters?
if config.has_option('ec2', 'stack_filters'):
self.stack_filters = config.getboolean('ec2', 'stack_filters')
else:
self.stack_filters = False
self.stack_filters = config.getboolean('ec2', 'stack_filters')
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
self.ec2_instance_filters = []
if config.has_option('ec2', 'instance_filters'):
filters = config.get('ec2', 'instance_filters')
filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
if self.stack_filters and '&' in filters:
self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n")
for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
filter_sets = [f for f in filters.split(',') if f]
for filter_set in filter_sets:
filters = {}
filter_set = filter_set.strip()
for instance_filter in filter_set.split("&"):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
filters[filter_key] = filter_value
self.ec2_instance_filters.append(filters.copy())
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
@ -548,6 +572,13 @@ class Ec2Inventory(object):
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
if self.iam_role:
sts_conn = sts.connect_to_region(region, **connect_args)
role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory')
connect_args['aws_access_key_id'] = role.credentials.access_key
connect_args['aws_secret_access_key'] = role.credentials.secret_key
connect_args['security_token'] = role.credentials.session_token
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
@ -564,12 +595,12 @@ class Ec2Inventory(object):
if self.ec2_instance_filters:
if self.stack_filters:
filters_dict = {}
for filter_key, filter_values in self.ec2_instance_filters.items():
filters_dict[filter_key] = filter_values
reservations.extend(conn.get_all_instances(filters = filters_dict))
for filters in self.ec2_instance_filters:
filters_dict.update(filters)
reservations.extend(conn.get_all_instances(filters=filters_dict))
else:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
for filters in self.ec2_instance_filters:
reservations.extend(conn.get_all_instances(filters=filters))
else:
reservations = conn.get_all_instances()
@ -583,7 +614,7 @@ class Ec2Inventory(object):
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
@ -605,10 +636,44 @@ class Ec2Inventory(object):
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def tags_match_filters(self, tags):
''' return True if given tags match configured filters '''
if not self.ec2_instance_filters:
return True
for filters in self.ec2_instance_filters:
for filter_name, filter_value in filters.items():
if filter_name[:4] != 'tag:':
continue
filter_name = filter_name[4:]
if filter_name not in tags:
if self.stack_filters:
return False
continue
if isinstance(filter_value, list):
if self.stack_filters and tags[filter_name] not in filter_value:
return False
if not self.stack_filters and tags[filter_name] in filter_value:
return True
if isinstance(filter_value, six.string_types):
if self.stack_filters and tags[filter_name] != filter_value:
return False
if not self.stack_filters and tags[filter_name] == filter_value:
return True
return self.stack_filters
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
if not HAS_BOTO3:
self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again",
"getting RDS instances")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
db_instances = client.describe_db_instances()
try:
conn = self.connect_to_aws(rds, region)
if conn:
@ -616,8 +681,15 @@ class Ec2Inventory(object):
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for instance in instances:
self.add_rds_instance(instance, region)
for index, instance in enumerate(instances):
# Add tags to instances.
instance.arn = db_instances['DBInstances'][index]['DBInstanceArn']
tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList']
instance.tags = {}
for tag in tags:
instance.tags[tag['Key']] = tag['Value']
if self.tags_match_filters(instance.tags):
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
@ -625,7 +697,11 @@ class Ec2Inventory(object):
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
elif e.error_code == "OptInRequired":
error = "RDS hasn't been enabled for this account yet. " \
"You must either log in to the RDS service through the AWS console to enable it, " \
"or set 'rds = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
@ -652,7 +728,7 @@ class Ec2Inventory(object):
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if self.ec2_instance_filters == {}:
if not self.ec2_instance_filters:
matches_filter = True
else:
matches_filter = False
@ -664,14 +740,18 @@ class Ec2Inventory(object):
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
for filters in self.ec2_instance_filters:
for filter_key, filter_values in filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
if matches_filter:
# it matches a filter, so stop looking for further matches
break
except Exception as e:
@ -692,7 +772,7 @@ class Ec2Inventory(object):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
@ -707,7 +787,11 @@ class Ec2Inventory(object):
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
elif e.error_code == "OptInRequired":
error = "ElastiCache hasn't been enabled for this account yet. " \
"You must either log in to the ElastiCache service through the AWS console to enable it, " \
"or set 'elasticache = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
@ -728,7 +812,7 @@ class Ec2Inventory(object):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
@ -767,7 +851,7 @@ class Ec2Inventory(object):
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))]
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
@ -800,8 +884,22 @@ class Ec2Inventory(object):
return
# Select the best destination address
# When destination_format and destination_format_tags are specified
# the following code will attempt to find the instance tags first,
# then the instance attributes next, and finally if neither are found
# assign nil for the desired destination format attribute.
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
dest_vars = []
inst_tags = getattr(instance, 'tags')
for tag in self.destination_format_tags:
if tag in inst_tags:
dest_vars.append(inst_tags[tag])
elif hasattr(instance, tag):
dest_vars.append(getattr(instance, tag))
else:
dest_vars.append('nil')
dest = self.destination_format.format(*dest_vars)
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
@ -891,6 +989,16 @@ class Ec2Inventory(object):
if self.nested_groups:
self.push_group(self.inventory, 'instance_states', state_name)
# Inventory: Group by platform
if self.group_by_platform:
if instance.platform:
platform = self.to_safe('platform_' + instance.platform)
else:
platform = self.to_safe('platform_undefined')
self.push(self.inventory, platform, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'platforms', platform)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
@ -915,11 +1023,11 @@ class Ec2Inventory(object):
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by AWS account ID
if self.group_by_aws_account:
self.push(self.inventory, self.aws_account_id, dest)
self.push(self.inventory, self.aws_account_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'accounts', self.aws_account_id)
@ -960,8 +1068,7 @@ class Ec2Inventory(object):
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
@ -1040,8 +1147,25 @@ class Ec2Inventory(object):
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by engine
if self.group_by_rds_engine:
@ -1055,11 +1179,17 @@ class Ec2Inventory(object):
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
@ -1310,8 +1440,7 @@ class Ec2Inventory(object):
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
self.route53_records = {}
@ -1328,14 +1457,13 @@ class Ec2Inventory(object):
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
instance_attributes = ['public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address']
name_list = set()
@ -1364,7 +1492,7 @@ class Ec2Inventory(object):
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
elif isinstance(value, (int, bool)):
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
@ -1391,13 +1519,13 @@ class Ec2Inventory(object):
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
# print key
# print type(value)
# print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
@ -1441,9 +1569,9 @@ class Ec2Inventory(object):
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
@ -1469,7 +1597,7 @@ class Ec2Inventory(object):
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
elif isinstance(value, (int, bool)):
host_info[key] = value
# Target: Everything
@ -1495,10 +1623,10 @@ class Ec2Inventory(object):
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
@ -1553,9 +1681,9 @@ class Ec2Inventory(object):
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
regex += r"\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):

View File

@ -68,6 +68,21 @@
#
# foreman_hostgroup_myapp_webtier_datacenter1
#
# If the parameter want_hostcollections is set to true, the
# collections each host is in are created as Ansible groups with a
# foreman_hostcollection prefix, all lowercase and problematic
# parameters removed. So e.g. the Foreman host collection
#
# Patch Window Thursday
#
# would turn into the Ansible group:
#
# foreman_hostcollection_patchwindowthursday
#
# If the parameter host_filters is set, it will be used as the
# "search" parameter for the /api/v2/hosts call. This can be used to
# restrict the list of returned host, as shown below.
#
# Furthermore Ansible groups can be created on the fly using the
# *group_patterns* variable in *foreman.ini* so that you can build up
# hierarchies using parameters on the hostgroup and host variables.
@ -108,15 +123,38 @@ user = foreman
password = secret
ssl_verify = True
# Retrieve only hosts from the organization "Web Engineering".
# host_filters = organization="Web Engineering"
# Retrieve only hosts from the organization "Web Engineering" that are
# also in the host collection "Apache Servers".
# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
[ansible]
group_patterns = ["{app}-{tier}-{color}",
"{app}-{color}",
"{app}",
"{tier}"]
group_prefix = foreman_
# Whether to fetch facts from Foreman and store them on the host
want_facts = True
# Whether to create Ansible groups for host collections. Only tested
# with Katello (Red Hat Satellite). Disabled by default to not break
# the script for stand-alone Foreman.
want_hostcollections = False
# Whether to interpret global parameters value as JSON (if possible, else
# take as is). Only tested with Katello (Red Hat Satellite).
# This allows to define lists and dictionaries (and more complicated structures)
# variables by entering them as JSON string in Foreman parameters.
# Disabled by default as the change would else not be backward compatible.
rich_params = False
[cache]
path = .
max_age = 60
# Whether to scan foreman to add recently created hosts in inventory cache
scan_new_hosts = True

View File

@ -46,6 +46,7 @@ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
from requests.auth import HTTPBasicAuth
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string"""
@ -54,6 +55,7 @@ def json_format_dict(data, pretty=False):
else:
return json.dumps(data)
class ForemanInventory(object):
def __init__(self):
@ -62,6 +64,7 @@ class ForemanInventory(object):
self.params = dict() # Params of each host
self.facts = dict() # Facts of each host
self.hostgroups = dict() # host groups
self.hostcollections = dict() # host collections
self.session = None # Requests session
self.config_paths = [
"/etc/ansible/foreman.ini",
@ -105,6 +108,22 @@ class ForemanInventory(object):
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_facts = True
try:
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_hostcollections = False
# Do we want parameters to be interpreted if possible as JSON? (no by default)
try:
self.rich_params = config.getboolean('ansible', 'rich_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.rich_params = False
try:
self.host_filters = config.get('foreman', 'host_filters')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.host_filters = None
# Cache related
try:
cache_path = os.path.expanduser(config.get('cache', 'path'))
@ -115,10 +134,16 @@ class ForemanInventory(object):
self.cache_path_inventory = cache_path + "/%s.index" % script
self.cache_path_params = cache_path + "/%s.params" % script
self.cache_path_facts = cache_path + "/%s.facts" % script
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
try:
self.cache_max_age = config.getint('cache', 'max_age')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.cache_max_age = 60
try:
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.scan_new_hosts = False
return True
def parse_cli_args(self):
@ -138,12 +163,17 @@ class ForemanInventory(object):
self.session.verify = self.foreman_ssl_verify
return self.session
def _get_json(self, url, ignore_errors=None):
def _get_json(self, url, ignore_errors=None, params=None):
if params is None:
params = {}
params['per_page'] = 250
page = 1
results = []
s = self._get_session()
while True:
ret = s.get(url, params={'page': page, 'per_page': 250})
params['page'] = page
ret = s.get(url, params=params)
if ignore_errors and ret.status_code in ignore_errors:
break
ret.raise_for_status()
@ -156,7 +186,7 @@ class ForemanInventory(object):
return json['results']
# List of all hosts is returned paginaged
results = results + json['results']
if len(results) >= json['total']:
if len(results) >= json['subtotal']:
break
page += 1
if len(json['results']) == 0:
@ -167,22 +197,35 @@ class ForemanInventory(object):
return results
def _get_hosts(self):
return self._get_json("%s/api/v2/hosts" % self.foreman_url)
url = "%s/api/v2/hosts" % self.foreman_url
def _get_all_params_by_id(self, hid):
params = {}
if self.host_filters:
params['search'] = self.host_filters
return self._get_json(url, params=params)
def _get_host_data_by_id(self, hid):
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
ret = self._get_json(url, [404])
if ret == []:
ret = {}
return ret.get('all_parameters', {})
return self._get_json(url)
def _resolve_params(self, host):
"""Fetch host params and convert to dict"""
def _get_facts_by_id(self, hid):
url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
return self._get_json(url)
def _resolve_params(self, host_params):
"""Convert host params to dict"""
params = {}
for param in self._get_all_params_by_id(host['id']):
for param in host_params:
name = param['name']
params[name] = param['value']
if self.rich_params:
try:
params[name] = json.loads(param['value'])
except ValueError:
params[name] = param['value']
else:
params[name] = param['value']
return params
@ -216,6 +259,7 @@ class ForemanInventory(object):
self.write_to_cache(self.inventory, self.cache_path_inventory)
self.write_to_cache(self.params, self.cache_path_params)
self.write_to_cache(self.facts, self.cache_path_facts)
self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores
@ -224,18 +268,23 @@ class ForemanInventory(object):
>>> ForemanInventory.to_safe("foo-bar baz")
'foo_barbaz'
'''
regex = "[^A-Za-z0-9\_]"
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
def update_cache(self):
def update_cache(self, scan_only_new_hosts=False):
"""Make calls to foreman and save the output in a cache"""
self.groups = dict()
self.hosts = dict()
for host in self._get_hosts():
if host['name'] in self.cache.keys() and scan_only_new_hosts:
continue
dns_name = host['name']
host_data = self._get_host_data_by_id(host['id'])
host_params = host_data.get('all_parameters', {})
# Create ansible groups for hostgroup
group = 'hostgroup'
val = host.get('%s_title' % group) or host.get('%s_name' % group)
@ -256,16 +305,13 @@ class ForemanInventory(object):
safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower()))
self.inventory[safe_key].append(dns_name)
params = self._resolve_params(host)
params = self._resolve_params(host_params)
# Ansible groups by parameters in host groups and Foreman host
# attributes.
groupby = copy.copy(params)
for k, v in host.items():
if isinstance(v, str):
groupby[k] = self.to_safe(v)
elif isinstance(v, int):
groupby[k] = v
groupby = dict()
for k, v in params.items():
groupby[k] = self.to_safe(str(v))
# The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns:
@ -275,6 +321,17 @@ class ForemanInventory(object):
except KeyError:
pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host_data.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host
self.params[dns_name] = params
self.facts[dns_name] = self._get_facts(host)
@ -296,31 +353,36 @@ class ForemanInventory(object):
def load_inventory_from_cache(self):
"""Read the index from the cache file sets self.index"""
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
with open(self.cache_path_inventory, 'r') as fp:
self.inventory = json.load(fp)
def load_params_from_cache(self):
"""Read the index from the cache file sets self.index"""
cache = open(self.cache_path_params, 'r')
json_params = cache.read()
self.params = json.loads(json_params)
with open(self.cache_path_params, 'r') as fp:
self.params = json.load(fp)
def load_facts_from_cache(self):
"""Read the index from the cache file sets self.facts"""
if not self.want_facts:
return
cache = open(self.cache_path_facts, 'r')
json_facts = cache.read()
self.facts = json.loads(json_facts)
with open(self.cache_path_facts, 'r') as fp:
self.facts = json.load(fp)
def load_hostcollections_from_cache(self):
"""Read the index from the cache file sets self.hostcollections"""
if not self.want_hostcollections:
return
with open(self.cache_path_hostcollections, 'r') as fp:
self.hostcollections = json.load(fp)
def load_cache_from_cache(self):
"""Read the cache from the cache file sets self.cache"""
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
with open(self.cache_path_cache, 'r') as fp:
self.cache = json.load(fp)
def get_inventory(self):
if self.args.refresh_cache or not self.is_cache_valid():
@ -329,7 +391,10 @@ class ForemanInventory(object):
self.load_inventory_from_cache()
self.load_params_from_cache()
self.load_facts_from_cache()
self.load_hostcollections_from_cache()
self.load_cache_from_cache()
if self.scan_new_hosts:
self.update_cache(True)
def get_host_info(self):
"""Get variables about a specific host"""

View File

@ -40,6 +40,7 @@ based on the data obtained from the libcloud Node object:
- gce_tags
- gce_metadata
- gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories:
- zone:
@ -73,7 +74,6 @@ Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
try:
import pkg_resources
except ImportError:
@ -83,8 +83,8 @@ except ImportError:
# library is used.
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v2"
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
@ -92,7 +92,10 @@ import argparse
from time import time
import ConfigParser
if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
@ -213,10 +216,11 @@ class GceInventory(object):
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
config = configparser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
@ -270,10 +274,11 @@ class GceInventory(object):
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
args = list(secrets.GCE_PARAMS)
kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True
except:
pass
@ -291,18 +296,23 @@ class GceInventory(object):
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce','gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path')
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
@ -315,7 +325,7 @@ class GceInventory(object):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
@ -325,17 +335,16 @@ class GceInventory(object):
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
@ -347,6 +356,9 @@ class GceInventory(object):
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal':
ssh_host = inst.private_ips[0]
@ -367,6 +379,7 @@ class GceInventory(object):
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host
}
@ -394,7 +407,7 @@ class GceInventory(object):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
self.driver.connection.gce_params=params
self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes
@ -470,6 +483,13 @@ class GceInventory(object):
else:
groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta
return groups

View File

@ -29,8 +29,11 @@
# - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will connect to
# all of them and present them as one contiguous inventory.
# regions of those clouds. If it does, this inventory module will by default
# connect to all of them and present them as one contiguous inventory. You
# can limit to one cloud by passing the `--cloud` parameter, or use the
# OS_CLOUD environment variable. If caching is enabled, and a cloud is
# selected, then per-cloud cache folders will be used.
#
# See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in
@ -44,6 +47,9 @@
# has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True)
#
# Also it is possible to pass the correct user by setting an ansible_user: $myuser
# metadata attribute.
import argparse
import collections
@ -108,20 +114,28 @@ def get_groups_from_server(server_vars, namegroup=True):
return groups
def get_host_groups(inventory, refresh=False):
(cache_file, cache_expiration_time) = get_cache_settings()
def get_host_groups(inventory, refresh=False, cloud=None):
(cache_file, cache_expiration_time) = get_cache_settings(cloud)
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups)
with open(cache_file, 'w') as f:
f.write(groups)
else:
groups = open(cache_file, 'r').read()
with open(cache_file, 'r') as f:
groups = f.read()
return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server)
metadata = server.get('metadata', {})
if 'ansible_user' in metadata:
hostvars[key]['ansible_user'] = metadata['ansible_user']
for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key)
@ -176,12 +190,14 @@ def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
return True
def get_cache_settings():
def get_cache_settings(cloud=None):
config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path()
if cloud:
cache_path = '{0}_{1}'.format(cache_path, cloud)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
@ -194,6 +210,8 @@ def to_json(in_dict):
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
help='Cloud name (default: None')
parser.add_argument('--private',
action='store_true',
help='Use private address for ansible host')
@ -218,6 +236,7 @@ def main():
refresh=args.refresh,
config_files=config_files,
private=args.private,
cloud=args.cloud,
)
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict(
@ -232,7 +251,7 @@ def main():
inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
output = get_host_groups(inventory, refresh=args.refresh)
output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)

View File

@ -1,18 +1,10 @@
clouds:
mordred:
cloud: hp
vexxhost:
profile: vexxhost
auth:
username: mordred@example.com
password: my-wonderful-password
project_name: mordred-tenant
region_name: region-b.geo-1
monty:
cloud: hp
auth:
username: monty.taylor@example.com
password: another-wonderful-password
project_name: monty.taylor@example.com-default-tenant
region_name: region-b.geo-1
project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9
username: fb886a9b-c37b-442a-9be3-964bed961e04
password: fantastic-password1
rax:
cloud: rackspace
auth:
@ -22,7 +14,7 @@ clouds:
region_name: DFW,ORD,IAD
devstack:
auth:
auth_url: http://127.0.0.1:35357/v2.0/
auth_url: https://devstack.example.com
username: stack
password: stack
project_name: stack

View File

@ -124,10 +124,10 @@ def create_connection():
# Create parser and add ovirt section if it doesn't exist:
config = configparser.SafeConfigParser(
defaults={
'ovirt_url': None,
'ovirt_username': None,
'ovirt_password': None,
'ovirt_ca_file': None,
'ovirt_url': os.environ.get('OVIRT_URL'),
'ovirt_username': os.environ.get('OVIRT_USERNAME'),
'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
'ovirt_ca_file': os.environ.get('OVIRT_CAFILE'),
}
)
if not config.has_section('ovirt'):

View File

@ -1,4 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C): 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Requirements
# - pyvmomi >= 6.0.0.2016.4
@ -23,42 +27,49 @@ $ jq '._meta.hostvars[].config' data.json | head
from __future__ import print_function
import argparse
import atexit
import datetime
import getpass
import jinja2
import itertools
import json
import os
import six
import re
import ssl
import sys
import uuid
from collections import defaultdict
from six.moves import configparser
from time import time
HAS_PYVMOMI = False
import six
from jinja2 import Environment
from six import integer_types, string_types
from six.moves import configparser
try:
from pyVmomi import vim
import argparse
except ImportError:
sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
try:
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
HAS_PYVMOMI = True
except ImportError:
pass
sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
try:
import json
except ImportError:
import simplejson as json
hasvcr = False
try:
import vcr
def regex_match(s, pattern):
'''Custom filter for regex matching'''
reg = re.compile(pattern)
if reg.match(s):
return True
else:
return False
hasvcr = True
except ImportError:
pass
def select_chain_match(inlist, key, pattern):
'''Get a key from a list of dicts, squash values to a single list, then filter'''
outlist = [x[key] for x in inlist]
outlist = list(itertools.chain(*outlist))
outlist = [x for x in outlist if regex_match(x, pattern)]
return outlist
class VMwareMissingHostException(Exception):
@ -89,10 +100,7 @@ class VMWareInventory(object):
skip_keys = []
groupby_patterns = []
if sys.version_info > (3, 0):
safe_types = [int, bool, str, float, None]
else:
safe_types = [int, long, bool, str, float, None]
safe_types = [bool, str, float, None] + list(integer_types)
iter_types = [dict, list]
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
@ -104,15 +112,18 @@ class VMWareInventory(object):
custom_fields = {}
# use jinja environments to allow for custom filters
env = Environment()
env.filters['regex_match'] = regex_match
env.filters['select_chain_match'] = select_chain_match
# translation table for attributes to fetch for known vim types
if not HAS_PYVMOMI:
vimTable = {}
else:
vimTable = {
vim.Datastore: ['_moId', 'name'],
vim.ResourcePool: ['_moId', 'name'],
vim.HostSystem: ['_moId', 'name'],
}
vimTable = {
vim.Datastore: ['_moId', 'name'],
vim.ResourcePool: ['_moId', 'name'],
vim.HostSystem: ['_moId', 'name'],
}
@staticmethod
def _empty_inventory():
@ -156,7 +167,6 @@ class VMWareInventory(object):
return json.dumps(data_to_print, indent=2)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
valid = False
@ -170,21 +180,16 @@ class VMWareInventory(object):
return valid
def do_api_calls_update_cache(self):
''' Get instances and cache the data '''
self.inventory = self.instances_to_inventory(self.get_instances())
self.write_to_cache(self.inventory)
def write_to_cache(self, data):
''' Dump inventory to json file '''
with open(self.cache_path_cache, 'wb') as f:
f.write(json.dumps(data))
def get_inventory_from_cache(self):
''' Read in jsonified inventory '''
jdata = None
@ -193,7 +198,6 @@ class VMWareInventory(object):
return json.loads(jdata)
def read_settings(self):
''' Reads the settings from the vmware_inventory.ini file '''
scriptbasename = __file__
@ -222,7 +226,7 @@ class VMWareInventory(object):
'resourceconfig',
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
'host_pattern': '{{ guest.ipaddress }}',
'host_filters': '{{ guest.gueststate == "running" }}',
'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
'lower_var_keys': True,
'custom_field_group_prefix': 'vmware_tag_',
@ -239,6 +243,9 @@ class VMWareInventory(object):
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
config.read(vmware_ini_path)
if 'vmware' not in config.sections():
config.add_section('vmware')
# apply defaults
for k, v in defaults['vmware'].items():
if not config.has_option('vmware', k):
@ -297,7 +304,6 @@ class VMWareInventory(object):
self.config = config
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
@ -314,7 +320,6 @@ class VMWareInventory(object):
self.args = parser.parse_args()
def get_instances(self):
''' Get a list of vm instances with pyvmomi '''
kwargs = {'host': self.server,
'user': self.username,
@ -329,17 +334,23 @@ class VMWareInventory(object):
return self._get_instances(kwargs)
def _get_instances(self, inkwargs):
''' Make API calls '''
instances = []
si = SmartConnect(**inkwargs)
try:
si = SmartConnect(**inkwargs)
except ssl.SSLError as connection_error:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
sys.exit("Unable to connect to ESXi server due to %s, "
"please specify validate_certs=False and try again" % connection_error)
except Exception as exc:
self.debugl("Unable to connect to ESXi server due to %s" % exc)
sys.exit("Unable to connect to ESXi server due to %s" % exc)
self.debugl('retrieving all instances')
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
sys.exit("Could not connect to the specified host using specified "
"username and password")
atexit.register(Disconnect, si)
content = si.RetrieveContent()
@ -370,18 +381,22 @@ class VMWareInventory(object):
instance_tuples.append((instance, ifacts))
self.debugl('facts collected for all instances')
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl('%d custom fieds collected' % len(self.custom_fields))
try:
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl('%d custom fields collected' % len(self.custom_fields))
except vmodl.RuntimeFault as exc:
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
except IndexError as exc:
self.debugl("Unable to gather custom fields due to %s" % exc)
return instance_tuples
def instances_to_inventory(self, instances):
''' Convert a list of vm objects into a json compliant inventory '''
self.debugl('re-indexing instances based on ini settings')
inventory = VMWareInventory._empty_inventory()
inventory['all'] = {}
@ -412,7 +427,7 @@ class VMWareInventory(object):
# Reset the inventory keys
for k, v in name_mapping.items():
if not host_mapping or not k in host_mapping:
if not host_mapping or k not in host_mapping:
continue
# set ansible_host (2.x)
@ -467,7 +482,7 @@ class VMWareInventory(object):
for k, v in inventory['_meta']['hostvars'].items():
if 'customvalue' in v:
for tv in v['customvalue']:
if not isinstance(tv['value'], str) and not isinstance(tv['value'], unicode):
if not isinstance(tv['value'], string_types):
continue
newkey = None
@ -493,12 +508,11 @@ class VMWareInventory(object):
return inventory
def create_template_mapping(self, inventory, pattern, dtype='string'):
''' Return a hash of uuid to templated string from pattern '''
mapping = {}
for k, v in inventory['_meta']['hostvars'].items():
t = jinja2.Template(pattern)
t = self.env.from_string(pattern)
newkey = None
try:
newkey = t.render(v)
@ -544,15 +558,27 @@ class VMWareInventory(object):
for idx, x in enumerate(parts):
# if the val wasn't set yet, get it from the parent
if not val:
val = getattr(vm, x)
if isinstance(val, dict):
if x in val:
val = val.get(x)
elif x.lower() in val:
val = val.get(x.lower())
else:
# in a subkey, get the subprop from the previous attrib
try:
val = getattr(val, x)
except AttributeError as e:
self.debugl(e)
# if the val wasn't set yet, get it from the parent
if not val:
try:
val = getattr(vm, x)
except AttributeError as e:
self.debugl(e)
else:
# in a subkey, get the subprop from the previous attrib
try:
val = getattr(val, x)
except AttributeError as e:
self.debugl(e)
# make sure it serializes
val = self._process_object_types(val)
# lowercase keys if requested
if self.lowerkeys:
@ -569,7 +595,6 @@ class VMWareInventory(object):
return rdata
def facts_from_vobj(self, vobj, level=0):
''' Traverse a VM object and return a json compliant data structure '''
# pyvmomi objects are not yet serializable, but may be one day ...
@ -616,7 +641,7 @@ class VMWareInventory(object):
return rdata
def _process_object_types(self, vobj, thisvm=None, inkey=None, level=0):
def _process_object_types(self, vobj, thisvm=None, inkey='', level=0):
''' Serialize an object '''
rdata = {}
@ -640,12 +665,10 @@ class VMWareInventory(object):
rdata = vobj.decode('ascii', 'ignore')
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
rdata = vobj
elif issubclass(type(vobj), int) or isinstance(vobj, int):
elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types):
rdata = vobj
elif issubclass(type(vobj), float) or isinstance(vobj, float):
rdata = vobj
elif issubclass(type(vobj), long) or isinstance(vobj, long):
rdata = vobj
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
rdata = []
try:
@ -703,14 +726,13 @@ class VMWareInventory(object):
return rdata
def get_host_info(self, host):
''' Return hostvars for a single host '''
if host in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][host]
elif self.args.host and self.inventory['_meta']['hostvars']:
match = None
for k, v in self.inventory['_meta']['hostvars']:
for k, v in self.inventory['_meta']['hostvars'].items():
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
match = k
break