Update inventory scripts

ec2
- added support for tags and instance attributes
- allow filtering RDS instances by tags
- add option to group by platform
- set missing defaults
- make cache unique to script ran
- bug fixes
- implement AND'd filters
azure_rm
- minor python 3 upgrades
cloudforms
- minor regex fix
foreman
- several new configurables
- changes to caching
gce
- python 3 upgrades
- added gce_subnetwork param
openstack
- added `--cloud` parameter
ovirt4
- obtain defaults from env vars
vmware_inventory
- changed imports
- allow for custom filters
- changed host_filters
- error handling
- python 3 upgrades
This commit is contained in:
AlanCoding
2018-02-26 12:45:54 -05:00
parent 9493b72f29
commit b878a844d0
11 changed files with 662 additions and 357 deletions

View File

@@ -187,14 +187,18 @@ Version: 1.0.0
''' '''
import argparse import argparse
import ConfigParser
import json import json
import os import os
import re import re
import sys import sys
import inspect import inspect
import traceback
try:
# python2
import ConfigParser as cp
except ImportError:
# python3
import configparser as cp
from packaging.version import Version from packaging.version import Version
@@ -326,7 +330,7 @@ class AzureRM(object):
path = expanduser("~") path = expanduser("~")
path += "/.azure/credentials" path += "/.azure/credentials"
try: try:
config = ConfigParser.ConfigParser() config = cp.ConfigParser()
config.read(path) config.read(path)
except Exception as exc: except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read " self.fail("Failed to access {0}. Check that the file exists and you have read "
@@ -616,6 +620,7 @@ class AzureInventory(object):
# Add windows details # Add windows details
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
host_vars['ansible_connection'] = 'winrm'
host_vars['windows_auto_updates_enabled'] = \ host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
@@ -795,7 +800,7 @@ class AzureInventory(object):
config = None config = None
settings = None settings = None
try: try:
config = ConfigParser.ConfigParser() config = cp.ConfigParser()
config.read(path) config.read(path)
except: except:
pass pass
@@ -838,9 +843,9 @@ class AzureInventory(object):
def _to_safe(self, word): def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_" regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups: if not self.replace_dash_in_groups:
regex += "\-" regex += r"\-"
return re.sub(regex + "]", "_", word) return re.sub(regex + "]", "_", word)

View File

@@ -468,7 +468,7 @@ class CloudFormsInventory(object):
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
""" """
if self.cloudforms_clean_group_keys: if self.cloudforms_clean_group_keys:
regex = "[^A-Za-z0-9\_]" regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", "")) return re.sub(regex, "_", word.replace(" ", ""))
else: else:
return word return word

View File

@@ -10,8 +10,9 @@
# AWS regions to make calls to. Set this to 'all' to make request to all regions # AWS regions to make calls to. Set this to 'all' to make request to all regions
# in AWS and merge the results together. Alternatively, set this to a comma # in AWS and merge the results together. Alternatively, set this to a comma
# separated list of regions. E.g. 'us-east-1, us-west-1, us-west-2' # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
# 'auto' is AWS_REGION or AWS_DEFAULT_REGION environment variable. # provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
# AWS_DEFAULT_REGION environment variable will be read to determine the region.
regions = all regions = all
regions_exclude = us-gov-west-1, cn-north-1 regions_exclude = us-gov-west-1, cn-north-1
@@ -134,6 +135,7 @@ group_by_aws_account = False
group_by_ami_id = True group_by_ami_id = True
group_by_instance_type = True group_by_instance_type = True
group_by_instance_state = False group_by_instance_state = False
group_by_platform = True
group_by_key_pair = True group_by_key_pair = True
group_by_vpc_id = True group_by_vpc_id = True
group_by_security_group = True group_by_security_group = True
@@ -157,7 +159,9 @@ group_by_elasticache_replication_group = True
# inventory. For the full list of possible filters, please read the EC2 API # inventory. For the full list of possible filters, please read the EC2 API
# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
# Filters are key/value pairs separated by '=', to list multiple filters use # Filters are key/value pairs separated by '=', to list multiple filters use
# a list separated by commas. See examples below. # a list separated by commas. To "AND" criteria together, use "&". Note that
# the "AND" is not useful along with stack_filters and so such usage is not allowed.
# See examples below.
# If you want to apply multiple filters simultaneously, set stack_filters to # If you want to apply multiple filters simultaneously, set stack_filters to
# True. Default behaviour is to combine the results of all filters. Stacking # True. Default behaviour is to combine the results of all filters. Stacking
@@ -179,6 +183,18 @@ stack_filters = False
# (ex. webservers15, webservers1a, webservers123 etc) # (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1* # instance_filters = tag:Name=webservers1*
# Retrieve only instances of type t1.micro that also have tag env=stage
# instance_filters = instance-type=t1.micro&tag:env=stage
# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
# that are of type m3.large, regardless of env tag
# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
# An IAM role can be assumed, so all requests are run as that role.
# This can be useful for connecting across different accounts, or to limit user
# access
# iam_role = role-arn
# A boto configuration profile may be used to separate out credentials # A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html # see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name # boto_profile = some-boto-profile-name

View File

@@ -12,9 +12,9 @@ variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123' export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123' export AWS_SECRET_ACCESS_KEY='abc123'
optional region environement variable if region is 'auto' Optional region environment variable if region is 'auto'
This script also assumes there is an ec2.ini file alongside it. To specify a This script also assumes that there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable: different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini export EC2_INI_PATH=/path/to/my_ec2.ini
@@ -95,12 +95,37 @@ consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist. ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new In addition, if an instance has AWS tags associated with it, each tag is a new
variable named: variable named:
- ec2_tag_[Key] = [Value] - ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'. 'ec2_security_group_names'.
When destination_format and destination_format_tags are specified
the destination_format can be built from the instance tags and attributes.
The behavior will first check the user defined tags, then proceed to
check instance attributes, and finally if neither are found 'nil' will
be used instead.
'my_instance': {
'region': 'us-east-1', # attribute
'availability_zone': 'us-east-1a', # attribute
'private_dns_name': '172.31.0.1', # attribute
'ec2_tag_deployment': 'blue', # tag
'ec2_tag_clusterid': 'ansible', # tag
'ec2_tag_Name': 'webserver', # tag
...
}
Inside of the ec2.ini file the following settings are specified:
...
destination_format: {0}-{1}-{2}-{3}
destination_format_tags: Name,clusterid,deployment,private_dns_name
...
These settings would produce a destination_format as the following:
'webserver-ansible-blue-172.31.0.1'
''' '''
# (c) 2012, Peter Sankauskas # (c) 2012, Peter Sankauskas
@@ -132,13 +157,14 @@ from boto import ec2
from boto import rds from boto import rds
from boto import elasticache from boto import elasticache
from boto import route53 from boto import route53
from boto import sts
import six import six
from ansible.module_utils import ec2 as ec2_utils from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False HAS_BOTO3 = False
try: try:
import boto3 import boto3 # noqa
HAS_BOTO3 = True HAS_BOTO3 = True
except ImportError: except ImportError:
pass pass
@@ -151,11 +177,65 @@ try:
except ImportError: except ImportError:
import simplejson as json import simplejson as json
DEFAULTS = {
'all_elasticache_clusters': 'False',
'all_elasticache_nodes': 'False',
'all_elasticache_replication_groups': 'False',
'all_instances': 'False',
'all_rds_instances': 'False',
'aws_access_key_id': None,
'aws_secret_access_key': None,
'aws_security_token': None,
'boto_profile': None,
'cache_max_age': '300',
'cache_path': '~/.ansible/tmp',
'destination_variable': 'public_dns_name',
'elasticache': 'True',
'eucalyptus': 'False',
'eucalyptus_host': None,
'expand_csv_tags': 'False',
'group_by_ami_id': 'True',
'group_by_availability_zone': 'True',
'group_by_aws_account': 'False',
'group_by_elasticache_cluster': 'True',
'group_by_elasticache_engine': 'True',
'group_by_elasticache_parameter_group': 'True',
'group_by_elasticache_replication_group': 'True',
'group_by_instance_id': 'True',
'group_by_instance_state': 'False',
'group_by_instance_type': 'True',
'group_by_key_pair': 'True',
'group_by_platform': 'True',
'group_by_rds_engine': 'True',
'group_by_rds_parameter_group': 'True',
'group_by_region': 'True',
'group_by_route53_names': 'True',
'group_by_security_group': 'True',
'group_by_tag_keys': 'True',
'group_by_tag_none': 'True',
'group_by_vpc_id': 'True',
'hostname_variable': None,
'iam_role': None,
'include_rds_clusters': 'False',
'nested_groups': 'False',
'pattern_exclude': None,
'pattern_include': None,
'rds': 'False',
'regions': 'all',
'regions_exclude': 'us-gov-west-1, cn-north-1',
'replace_dash_in_groups': 'True',
'route53': 'False',
'route53_excluded_zones': '',
'route53_hostnames': None,
'stack_filters': 'False',
'vpc_destination_variable': 'ip_address'
}
class Ec2Inventory(object): class Ec2Inventory(object):
def _empty_inventory(self): def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}} return {"_meta": {"hostvars": {}}}
def __init__(self): def __init__(self):
''' Main execution path ''' ''' Main execution path '''
@@ -204,7 +284,6 @@ class Ec2Inventory(object):
print(data_to_print) print(data_to_print)
def is_cache_valid(self): def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid ''' ''' Determines if the cache files have expired, or if it is still valid '''
@@ -217,7 +296,6 @@ class Ec2Inventory(object):
return False return False
def read_settings(self): def read_settings(self):
''' Reads the settings from the ec2.ini file ''' ''' Reads the settings from the ec2.ini file '''
@@ -225,35 +303,50 @@ class Ec2Inventory(object):
scriptbasename = os.path.basename(scriptbasename) scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '') scriptbasename = scriptbasename.replace('.py', '')
defaults = {'ec2': { defaults = {
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) 'ec2': {
'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'),
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
} }
} }
if six.PY3: if six.PY3:
config = configparser.ConfigParser() config = configparser.ConfigParser(DEFAULTS)
else: else:
config = configparser.SafeConfigParser() config = configparser.SafeConfigParser(DEFAULTS)
ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
config.read(ec2_ini_path)
if not os.path.isfile(ec2_ini_path):
ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback'])
if os.path.isfile(ec2_ini_path):
config.read(ec2_ini_path)
# Add empty sections if they don't exist
try:
config.add_section('ec2')
except configparser.DuplicateSectionError:
pass
try:
config.add_section('credentials')
except configparser.DuplicateSectionError:
pass
# is eucalyptus? # is eucalyptus?
self.eucalyptus_host = None self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
self.eucalyptus = False self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions # Regions
self.regions = [] self.regions = []
configRegions = config.get('ec2', 'regions') configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'): if (configRegions == 'all'):
if self.eucalyptus_host: if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else: else:
configRegions_exclude = config.get('ec2', 'regions_exclude')
for regionInfo in ec2.regions(): for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude: if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name) self.regions.append(regionInfo.name)
@@ -263,16 +356,12 @@ class Ec2Inventory(object):
env_region = os.environ.get('AWS_REGION') env_region = os.environ.get('AWS_REGION')
if env_region is None: if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION') env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [ env_region ] self.regions = [env_region]
# Destination addresses # Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable') self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
self.hostname_variable = config.get('ec2', 'hostname_variable')
if config.has_option('ec2', 'hostname_variable'):
self.hostname_variable = config.get('ec2', 'hostname_variable')
else:
self.hostname_variable = None
if config.has_option('ec2', 'destination_format') and \ if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'): config.has_option('ec2', 'destination_format_tags'):
@@ -284,36 +373,22 @@ class Ec2Inventory(object):
# Route53 # Route53
self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_enabled = config.getboolean('ec2', 'route53')
if config.has_option('ec2', 'route53_hostnames'): self.route53_hostnames = config.get('ec2', 'route53_hostnames')
self.route53_hostnames = config.get('ec2', 'route53_hostnames')
else:
self.route53_hostnames = None
self.route53_excluded_zones = [] self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a]
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances? # Include RDS instances?
self.rds_enabled = True self.rds_enabled = config.getboolean('ec2', 'rds')
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances? # Include RDS cluster instances?
if config.has_option('ec2', 'include_rds_clusters'): self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
else:
self.include_rds_clusters = False
# Include ElastiCache instances? # Include ElastiCache instances?
self.elasticache_enabled = True self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances? # Return all EC2 instances?
if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances')
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'. # Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option. # Setting 'all_instances' to 'yes' overrides this option.
@@ -338,49 +413,30 @@ class Ec2Inventory(object):
self.ec2_instance_states = ['running'] self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled) # Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled) # Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled) # Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled) # Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument then environment variables then config file) # boto configuration profile (prefer CLI argument then environment variables then config file)
self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE') self.boto_profile = self.args.boto_profile or \
if config.has_option('ec2', 'boto_profile') and not self.boto_profile: os.environ.get('AWS_PROFILE') or \
self.boto_profile = config.get('ec2', 'boto_profile') config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables) # AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')): os.environ.get('AWS_PROFILE')):
if config.has_option('credentials', 'aws_access_key_id'):
aws_access_key_id = config.get('credentials', 'aws_access_key_id') aws_access_key_id = config.get('credentials', 'aws_access_key_id')
else: aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
aws_access_key_id = None aws_security_token = config.get('credentials', 'aws_security_token')
if config.has_option('credentials', 'aws_secret_access_key'):
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
else:
aws_secret_access_key = None
if config.has_option('credentials', 'aws_security_token'):
aws_security_token = config.get('credentials', 'aws_security_token')
else:
aws_security_token = None
if aws_access_key_id: if aws_access_key_id:
self.credentials = { self.credentials = {
'aws_access_key_id': aws_access_key_id, 'aws_access_key_id': aws_access_key_id,
@@ -400,111 +456,79 @@ class Ec2Inventory(object):
cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
if cache_id: if cache_id:
cache_name = '%s-%s' % (cache_name, cache_id) cache_name = '%s-%s' % (cache_name, cache_id)
cache_name += '-' + str(abs(hash(__file__)))[1:7]
self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
self.cache_max_age = config.getint('ec2', 'cache_max_age') self.cache_max_age = config.getint('ec2', 'cache_max_age')
if config.has_option('ec2', 'expand_csv_tags'): self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Configure nested groups instead of flat namespace. # Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups')
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names # Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'): self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else: # IAM role to assume for connection
self.replace_dash_in_groups = True self.iam_role = config.get('ec2', 'iam_role')
# Configure which groups should be created. # Configure which groups should be created.
group_by_options = [
'group_by_instance_id', group_by_options = [a for a in DEFAULTS if a.startswith('group_by')]
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_instance_state',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
'group_by_aws_account',
]
for option in group_by_options: for option in group_by_options:
if config.has_option('ec2', option): setattr(self, option, config.getboolean('ec2', option))
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern? # Do we need to just include hosts that match a pattern?
try: self.pattern_include = config.get('ec2', 'pattern_include')
pattern_include = config.get('ec2', 'pattern_include') if self.pattern_include:
if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(self.pattern_include)
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern? # Do we need to exclude hosts that match a pattern?
try: self.pattern_exclude = config.get('ec2', 'pattern_exclude')
pattern_exclude = config.get('ec2', 'pattern_exclude') if self.pattern_exclude:
if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(self.pattern_exclude)
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Do we want to stack multiple filters? # Do we want to stack multiple filters?
if config.has_option('ec2', 'stack_filters'): self.stack_filters = config.getboolean('ec2', 'stack_filters')
self.stack_filters = config.getboolean('ec2', 'stack_filters')
else:
self.stack_filters = False
# Instance filters (see boto and EC2 API docs). Ignore invalid filters. # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list) self.ec2_instance_filters = []
if config.has_option('ec2', 'instance_filters'): if config.has_option('ec2', 'instance_filters'):
filters = config.get('ec2', 'instance_filters')
filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] if self.stack_filters and '&' in filters:
self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n")
for instance_filter in filters: filter_sets = [f for f in filters.split(',') if f]
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter: for filter_set in filter_sets:
continue filters = {}
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] filter_set = filter_set.strip()
if not filter_key: for instance_filter in filter_set.split("&"):
continue instance_filter = instance_filter.strip()
self.ec2_instance_filters[filter_key].append(filter_value) if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
filters[filter_key] = filter_value
self.ec2_instance_filters.append(filters.copy())
def parse_cli_args(self): def parse_cli_args(self):
''' Command line argument processing ''' ''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True, parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)') help='List instances (default: True)')
parser.add_argument('--host', action='store', parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance') help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False, parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2') help='Use boto profile for connections to EC2')
self.args = parser.parse_args() self.args = parser.parse_args()
def do_api_calls_update_cache(self): def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files ''' ''' Do API calls to each region, and save data in cache files '''
@@ -548,6 +572,13 @@ class Ec2Inventory(object):
connect_args['profile_name'] = self.boto_profile connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args) self.boto_fix_security_token_in_profile(connect_args)
if self.iam_role:
sts_conn = sts.connect_to_region(region, **connect_args)
role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory')
connect_args['aws_access_key_id'] = role.credentials.access_key
connect_args['aws_secret_access_key'] = role.credentials.secret_key
connect_args['security_token'] = role.credentials.session_token
conn = module.connect_to_region(region, **connect_args) conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None: if conn is None:
@@ -564,12 +595,12 @@ class Ec2Inventory(object):
if self.ec2_instance_filters: if self.ec2_instance_filters:
if self.stack_filters: if self.stack_filters:
filters_dict = {} filters_dict = {}
for filter_key, filter_values in self.ec2_instance_filters.items(): for filters in self.ec2_instance_filters:
filters_dict[filter_key] = filter_values filters_dict.update(filters)
reservations.extend(conn.get_all_instances(filters = filters_dict)) reservations.extend(conn.get_all_instances(filters=filters_dict))
else: else:
for filter_key, filter_values in self.ec2_instance_filters.items(): for filters in self.ec2_instance_filters:
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) reservations.extend(conn.get_all_instances(filters=filters))
else: else:
reservations = conn.get_all_instances() reservations = conn.get_all_instances()
@@ -583,7 +614,7 @@ class Ec2Inventory(object):
max_filter_value = 199 max_filter_value = 199
tags = [] tags = []
for i in range(0, len(instance_ids), max_filter_value): for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]})) tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
tags_by_instance_id = defaultdict(dict) tags_by_instance_id = defaultdict(dict)
for tag in tags: for tag in tags:
@@ -605,10 +636,44 @@ class Ec2Inventory(object):
error = "Error connecting to %s backend.\n%s" % (backend, e.message) error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances') self.fail_with_error(error, 'getting EC2 instances')
def tags_match_filters(self, tags):
''' return True if given tags match configured filters '''
if not self.ec2_instance_filters:
return True
for filters in self.ec2_instance_filters:
for filter_name, filter_value in filters.items():
if filter_name[:4] != 'tag:':
continue
filter_name = filter_name[4:]
if filter_name not in tags:
if self.stack_filters:
return False
continue
if isinstance(filter_value, list):
if self.stack_filters and tags[filter_name] not in filter_value:
return False
if not self.stack_filters and tags[filter_name] in filter_value:
return True
if isinstance(filter_value, six.string_types):
if self.stack_filters and tags[filter_name] != filter_value:
return False
if not self.stack_filters and tags[filter_name] == filter_value:
return True
return self.stack_filters
def get_rds_instances_by_region(self, region): def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular ''' Makes an AWS API call to the list of RDS instances in a particular
region ''' region '''
if not HAS_BOTO3:
self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again",
"getting RDS instances")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
db_instances = client.describe_db_instances()
try: try:
conn = self.connect_to_aws(rds, region) conn = self.connect_to_aws(rds, region)
if conn: if conn:
@@ -616,8 +681,15 @@ class Ec2Inventory(object):
while True: while True:
instances = conn.get_all_dbinstances(marker=marker) instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker marker = instances.marker
for instance in instances: for index, instance in enumerate(instances):
self.add_rds_instance(instance, region) # Add tags to instances.
instance.arn = db_instances['DBInstances'][index]['DBInstanceArn']
tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList']
instance.tags = {}
for tag in tags:
instance.tags[tag['Key']] = tag['Value']
if self.tags_match_filters(instance.tags):
self.add_rds_instance(instance, region)
if not marker: if not marker:
break break
except boto.exception.BotoServerError as e: except boto.exception.BotoServerError as e:
@@ -625,7 +697,11 @@ class Ec2Inventory(object):
if e.error_code == 'AuthFailure': if e.error_code == 'AuthFailure':
error = self.get_auth_error_message() error = self.get_auth_error_message()
if not e.reason == "Forbidden": elif e.error_code == "OptInRequired":
error = "RDS hasn't been enabled for this account yet. " \
"You must either log in to the RDS service through the AWS console to enable it, " \
"or set 'rds = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances') self.fail_with_error(error, 'getting RDS instances')
@@ -652,7 +728,7 @@ class Ec2Inventory(object):
if 'LatestRestorableTime' in c: if 'LatestRestorableTime' in c:
del c['LatestRestorableTime'] del c['LatestRestorableTime']
if self.ec2_instance_filters == {}: if not self.ec2_instance_filters:
matches_filter = True matches_filter = True
else: else:
matches_filter = False matches_filter = False
@@ -664,14 +740,18 @@ class Ec2Inventory(object):
c['Tags'] = tags['TagList'] c['Tags'] = tags['TagList']
if self.ec2_instance_filters: if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items(): for filters in self.ec2_instance_filters:
# get AWS tag key e.g. tag:env will be 'env' for filter_key, filter_values in filters.items():
tag_name = filter_key.split(":", 1)[1] # get AWS tag key e.g. tag:env will be 'env'
# Filter values is a list (if you put multiple values for the same tag name) tag_name = filter_key.split(":", 1)[1]
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) # Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
if matches_filter: if matches_filter:
# it matches a filter, so stop looking for further matches
break break
except Exception as e: except Exception as e:
@@ -692,7 +772,7 @@ class Ec2Inventory(object):
''' Makes an AWS API call to the list of ElastiCache clusters (with ''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.''' nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method, # ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by # that's why we need to call describe directly (it would be called by
# the shorthand method anyway...) # the shorthand method anyway...)
try: try:
@@ -707,7 +787,11 @@ class Ec2Inventory(object):
if e.error_code == 'AuthFailure': if e.error_code == 'AuthFailure':
error = self.get_auth_error_message() error = self.get_auth_error_message()
if not e.reason == "Forbidden": elif e.error_code == "OptInRequired":
error = "ElastiCache hasn't been enabled for this account yet. " \
"You must either log in to the ElastiCache service through the AWS console to enable it, " \
"or set 'elasticache = False' in ec2.ini"
elif not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters') self.fail_with_error(error, 'getting ElastiCache clusters')
@@ -728,7 +812,7 @@ class Ec2Inventory(object):
''' Makes an AWS API call to the list of ElastiCache replication groups ''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.''' in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method, # ElastiCache boto module doesn't provide a get_all_instances method,
# that's why we need to call describe directly (it would be called by # that's why we need to call describe directly (it would be called by
# the shorthand method anyway...) # the shorthand method anyway...)
try: try:
@@ -767,7 +851,7 @@ class Ec2Inventory(object):
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))]
if len(boto_config_found) > 0: if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else: else:
@@ -800,8 +884,22 @@ class Ec2Inventory(object):
return return
# Select the best destination address # Select the best destination address
# When destination_format and destination_format_tags are specified
# the following code will attempt to find the instance tags first,
# then the instance attributes next, and finally if neither are found
# assign nil for the desired destination format attribute.
if self.destination_format and self.destination_format_tags: if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ]) dest_vars = []
inst_tags = getattr(instance, 'tags')
for tag in self.destination_format_tags:
if tag in inst_tags:
dest_vars.append(inst_tags[tag])
elif hasattr(instance, tag):
dest_vars.append(getattr(instance, tag))
else:
dest_vars.append('nil')
dest = self.destination_format.format(*dest_vars)
elif instance.subnet_id: elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None) dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None: if dest is None:
@@ -891,6 +989,16 @@ class Ec2Inventory(object):
if self.nested_groups: if self.nested_groups:
self.push_group(self.inventory, 'instance_states', state_name) self.push_group(self.inventory, 'instance_states', state_name)
# Inventory: Group by platform
if self.group_by_platform:
if instance.platform:
platform = self.to_safe('platform_' + instance.platform)
else:
platform = self.to_safe('platform_undefined')
self.push(self.inventory, platform, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'platforms', platform)
# Inventory: Group by key pair # Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name: if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name) key_name = self.to_safe('key_' + instance.key_name)
@@ -915,11 +1023,11 @@ class Ec2Inventory(object):
self.push_group(self.inventory, 'security_groups', key) self.push_group(self.inventory, 'security_groups', key)
except AttributeError: except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.', self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.'])) 'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by AWS account ID # Inventory: Group by AWS account ID
if self.group_by_aws_account: if self.group_by_aws_account:
self.push(self.inventory, self.aws_account_id, dest) self.push(self.inventory, self.aws_account_id, hostname)
if self.nested_groups: if self.nested_groups:
self.push_group(self.inventory, 'accounts', self.aws_account_id) self.push_group(self.inventory, 'accounts', self.aws_account_id)
@@ -960,8 +1068,7 @@ class Ec2Inventory(object):
self.push(self.inventory, 'ec2', hostname) self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_rds_instance(self, instance, region): def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is ''' Adds an RDS instance to the inventory and index, as long as it is
@@ -1040,8 +1147,25 @@ class Ec2Inventory(object):
except AttributeError: except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.', self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.'])) 'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by engine # Inventory: Group by engine
if self.group_by_rds_engine: if self.group_by_rds_engine:
@@ -1055,11 +1179,17 @@ class Ec2Inventory(object):
if self.nested_groups: if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: all RDS instances # Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname) self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
def add_elasticache_cluster(self, cluster, region): def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as ''' Adds an ElastiCache cluster to the inventory and index, as long as
@@ -1310,8 +1440,7 @@ class Ec2Inventory(object):
r53_conn = route53.Route53Connection() r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones() all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1] route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
not in self.route53_excluded_zones ]
self.route53_records = {} self.route53_records = {}
@@ -1328,14 +1457,13 @@ class Ec2Inventory(object):
self.route53_records.setdefault(resource, set()) self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name) self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance): def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from ''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. ''' instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name', instance_attributes = ['public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ] 'ip_address', 'private_ip_address']
name_list = set() name_list = set()
@@ -1364,7 +1492,7 @@ class Ec2Inventory(object):
elif key == 'ec2__previous_state': elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or '' instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]: elif isinstance(value, (int, bool)):
instance_vars[key] = value instance_vars[key] = value
elif isinstance(value, six.string_types): elif isinstance(value, six.string_types):
instance_vars[key] = value.strip() instance_vars[key] = value.strip()
@@ -1391,13 +1519,13 @@ class Ec2Inventory(object):
elif key == 'ec2_block_device_mapping': elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {} instance_vars["ec2_block_devices"] = {}
for k, v in value.items(): for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
else: else:
pass pass
# TODO Product codes if someone finds them useful # TODO Product codes if someone finds them useful
#print key # print key
#print type(value) # print type(value)
#print value # print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
@@ -1441,9 +1569,9 @@ class Ec2Inventory(object):
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica': elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
replica_count += 1 replica_count += 1
# Target: Redis Replication Groups # Target: Redis Replication Groups
@@ -1469,7 +1597,7 @@ class Ec2Inventory(object):
# Target: Everything # Target: Everything
# Preserve booleans and integers # Preserve booleans and integers
elif type(value) in [int, bool]: elif isinstance(value, (int, bool)):
host_info[key] = value host_info[key] = value
# Target: Everything # Target: Everything
@@ -1495,10 +1623,10 @@ class Ec2Inventory(object):
# Need to load index from cache # Need to load index from cache
self.load_index_from_cache() self.load_index_from_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# try updating the cache # try updating the cache
self.do_api_calls_update_cache() self.do_api_calls_update_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# host might not exist anymore # host might not exist anymore
return self.json_format_dict({}, True) return self.json_format_dict({}, True)
@@ -1553,9 +1681,9 @@ class Ec2Inventory(object):
def to_safe(self, word): def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_" regex = r"[^A-Za-z0-9\_"
if not self.replace_dash_in_groups: if not self.replace_dash_in_groups:
regex += "\-" regex += r"\-"
return re.sub(regex + "]", "_", word) return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False): def json_format_dict(self, data, pretty=False):

View File

@@ -68,6 +68,21 @@
# #
# foreman_hostgroup_myapp_webtier_datacenter1 # foreman_hostgroup_myapp_webtier_datacenter1
# #
# If the parameter want_hostcollections is set to true, the
# collections each host is in are created as Ansible groups with a
# foreman_hostcollection prefix, all lowercase and problematic
# parameters removed. So e.g. the Foreman host collection
#
# Patch Window Thursday
#
# would turn into the Ansible group:
#
# foreman_hostcollection_patchwindowthursday
#
# If the parameter host_filters is set, it will be used as the
# "search" parameter for the /api/v2/hosts call. This can be used to
# restrict the list of returned host, as shown below.
#
# Furthermore Ansible groups can be created on the fly using the # Furthermore Ansible groups can be created on the fly using the
# *group_patterns* variable in *foreman.ini* so that you can build up # *group_patterns* variable in *foreman.ini* so that you can build up
# hierarchies using parameters on the hostgroup and host variables. # hierarchies using parameters on the hostgroup and host variables.
@@ -108,15 +123,38 @@ user = foreman
password = secret password = secret
ssl_verify = True ssl_verify = True
# Retrieve only hosts from the organization "Web Engineering".
# host_filters = organization="Web Engineering"
# Retrieve only hosts from the organization "Web Engineering" that are
# also in the host collection "Apache Servers".
# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
[ansible] [ansible]
group_patterns = ["{app}-{tier}-{color}", group_patterns = ["{app}-{tier}-{color}",
"{app}-{color}", "{app}-{color}",
"{app}", "{app}",
"{tier}"] "{tier}"]
group_prefix = foreman_ group_prefix = foreman_
# Whether to fetch facts from Foreman and store them on the host # Whether to fetch facts from Foreman and store them on the host
want_facts = True want_facts = True
# Whether to create Ansible groups for host collections. Only tested
# with Katello (Red Hat Satellite). Disabled by default to not break
# the script for stand-alone Foreman.
want_hostcollections = False
# Whether to interpret global parameters value as JSON (if possible, else
# take as is). Only tested with Katello (Red Hat Satellite).
# This allows to define lists and dictionaries (and more complicated structures)
# variables by entering them as JSON string in Foreman parameters.
# Disabled by default as the change would else not be backward compatible.
rich_params = False
[cache] [cache]
path = . path = .
max_age = 60 max_age = 60
# Whether to scan foreman to add recently created hosts in inventory cache
scan_new_hosts = True

View File

@@ -46,6 +46,7 @@ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
from requests.auth import HTTPBasicAuth from requests.auth import HTTPBasicAuth
def json_format_dict(data, pretty=False): def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string""" """Converts a dict to a JSON object and dumps it as a formatted string"""
@@ -54,6 +55,7 @@ def json_format_dict(data, pretty=False):
else: else:
return json.dumps(data) return json.dumps(data)
class ForemanInventory(object): class ForemanInventory(object):
def __init__(self): def __init__(self):
@@ -62,6 +64,7 @@ class ForemanInventory(object):
self.params = dict() # Params of each host self.params = dict() # Params of each host
self.facts = dict() # Facts of each host self.facts = dict() # Facts of each host
self.hostgroups = dict() # host groups self.hostgroups = dict() # host groups
self.hostcollections = dict() # host collections
self.session = None # Requests session self.session = None # Requests session
self.config_paths = [ self.config_paths = [
"/etc/ansible/foreman.ini", "/etc/ansible/foreman.ini",
@@ -105,6 +108,22 @@ class ForemanInventory(object):
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_facts = True self.want_facts = True
try:
self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.want_hostcollections = False
# Do we want parameters to be interpreted if possible as JSON? (no by default)
try:
self.rich_params = config.getboolean('ansible', 'rich_params')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.rich_params = False
try:
self.host_filters = config.get('foreman', 'host_filters')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.host_filters = None
# Cache related # Cache related
try: try:
cache_path = os.path.expanduser(config.get('cache', 'path')) cache_path = os.path.expanduser(config.get('cache', 'path'))
@@ -115,10 +134,16 @@ class ForemanInventory(object):
self.cache_path_inventory = cache_path + "/%s.index" % script self.cache_path_inventory = cache_path + "/%s.index" % script
self.cache_path_params = cache_path + "/%s.params" % script self.cache_path_params = cache_path + "/%s.params" % script
self.cache_path_facts = cache_path + "/%s.facts" % script self.cache_path_facts = cache_path + "/%s.facts" % script
self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
try: try:
self.cache_max_age = config.getint('cache', 'max_age') self.cache_max_age = config.getint('cache', 'max_age')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.cache_max_age = 60 self.cache_max_age = 60
try:
self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
self.scan_new_hosts = False
return True return True
def parse_cli_args(self): def parse_cli_args(self):
@@ -138,12 +163,17 @@ class ForemanInventory(object):
self.session.verify = self.foreman_ssl_verify self.session.verify = self.foreman_ssl_verify
return self.session return self.session
def _get_json(self, url, ignore_errors=None): def _get_json(self, url, ignore_errors=None, params=None):
if params is None:
params = {}
params['per_page'] = 250
page = 1 page = 1
results = [] results = []
s = self._get_session() s = self._get_session()
while True: while True:
ret = s.get(url, params={'page': page, 'per_page': 250}) params['page'] = page
ret = s.get(url, params=params)
if ignore_errors and ret.status_code in ignore_errors: if ignore_errors and ret.status_code in ignore_errors:
break break
ret.raise_for_status() ret.raise_for_status()
@@ -156,7 +186,7 @@ class ForemanInventory(object):
return json['results'] return json['results']
# List of all hosts is returned paginaged # List of all hosts is returned paginaged
results = results + json['results'] results = results + json['results']
if len(results) >= json['total']: if len(results) >= json['subtotal']:
break break
page += 1 page += 1
if len(json['results']) == 0: if len(json['results']) == 0:
@@ -167,22 +197,35 @@ class ForemanInventory(object):
return results return results
def _get_hosts(self): def _get_hosts(self):
return self._get_json("%s/api/v2/hosts" % self.foreman_url) url = "%s/api/v2/hosts" % self.foreman_url
def _get_all_params_by_id(self, hid): params = {}
if self.host_filters:
params['search'] = self.host_filters
return self._get_json(url, params=params)
def _get_host_data_by_id(self, hid):
url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
ret = self._get_json(url, [404]) return self._get_json(url)
if ret == []:
ret = {}
return ret.get('all_parameters', {})
def _resolve_params(self, host): def _get_facts_by_id(self, hid):
"""Fetch host params and convert to dict""" url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
return self._get_json(url)
def _resolve_params(self, host_params):
"""Convert host params to dict"""
params = {} params = {}
for param in self._get_all_params_by_id(host['id']): for param in host_params:
name = param['name'] name = param['name']
params[name] = param['value'] if self.rich_params:
try:
params[name] = json.loads(param['value'])
except ValueError:
params[name] = param['value']
else:
params[name] = param['value']
return params return params
@@ -216,6 +259,7 @@ class ForemanInventory(object):
self.write_to_cache(self.inventory, self.cache_path_inventory) self.write_to_cache(self.inventory, self.cache_path_inventory)
self.write_to_cache(self.params, self.cache_path_params) self.write_to_cache(self.params, self.cache_path_params)
self.write_to_cache(self.facts, self.cache_path_facts) self.write_to_cache(self.facts, self.cache_path_facts)
self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
def to_safe(self, word): def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores '''Converts 'bad' characters in a string to underscores
@@ -224,18 +268,23 @@ class ForemanInventory(object):
>>> ForemanInventory.to_safe("foo-bar baz") >>> ForemanInventory.to_safe("foo-bar baz")
'foo_barbaz' 'foo_barbaz'
''' '''
regex = "[^A-Za-z0-9\_]" regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", "")) return re.sub(regex, "_", word.replace(" ", ""))
def update_cache(self): def update_cache(self, scan_only_new_hosts=False):
"""Make calls to foreman and save the output in a cache""" """Make calls to foreman and save the output in a cache"""
self.groups = dict() self.groups = dict()
self.hosts = dict() self.hosts = dict()
for host in self._get_hosts(): for host in self._get_hosts():
if host['name'] in self.cache.keys() and scan_only_new_hosts:
continue
dns_name = host['name'] dns_name = host['name']
host_data = self._get_host_data_by_id(host['id'])
host_params = host_data.get('all_parameters', {})
# Create ansible groups for hostgroup # Create ansible groups for hostgroup
group = 'hostgroup' group = 'hostgroup'
val = host.get('%s_title' % group) or host.get('%s_name' % group) val = host.get('%s_title' % group) or host.get('%s_name' % group)
@@ -256,16 +305,13 @@ class ForemanInventory(object):
safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower()))
self.inventory[safe_key].append(dns_name) self.inventory[safe_key].append(dns_name)
params = self._resolve_params(host) params = self._resolve_params(host_params)
# Ansible groups by parameters in host groups and Foreman host # Ansible groups by parameters in host groups and Foreman host
# attributes. # attributes.
groupby = copy.copy(params) groupby = dict()
for k, v in host.items(): for k, v in params.items():
if isinstance(v, str): groupby[k] = self.to_safe(str(v))
groupby[k] = self.to_safe(v)
elif isinstance(v, int):
groupby[k] = v
# The name of the ansible groups is given by group_patterns: # The name of the ansible groups is given by group_patterns:
for pattern in self.group_patterns: for pattern in self.group_patterns:
@@ -275,6 +321,17 @@ class ForemanInventory(object):
except KeyError: except KeyError:
pass # Host not part of this group pass # Host not part of this group
if self.want_hostcollections:
hostcollections = host_data.get('host_collections')
if hostcollections:
# Create Ansible groups for host collections
for hostcollection in hostcollections:
safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
self.inventory[safe_key].append(dns_name)
self.hostcollections[dns_name] = hostcollections
self.cache[dns_name] = host self.cache[dns_name] = host
self.params[dns_name] = params self.params[dns_name] = params
self.facts[dns_name] = self._get_facts(host) self.facts[dns_name] = self._get_facts(host)
@@ -296,31 +353,36 @@ class ForemanInventory(object):
def load_inventory_from_cache(self): def load_inventory_from_cache(self):
"""Read the index from the cache file sets self.index""" """Read the index from the cache file sets self.index"""
cache = open(self.cache_path_inventory, 'r') with open(self.cache_path_inventory, 'r') as fp:
json_inventory = cache.read() self.inventory = json.load(fp)
self.inventory = json.loads(json_inventory)
def load_params_from_cache(self): def load_params_from_cache(self):
"""Read the index from the cache file sets self.index""" """Read the index from the cache file sets self.index"""
cache = open(self.cache_path_params, 'r') with open(self.cache_path_params, 'r') as fp:
json_params = cache.read() self.params = json.load(fp)
self.params = json.loads(json_params)
def load_facts_from_cache(self): def load_facts_from_cache(self):
"""Read the index from the cache file sets self.facts""" """Read the index from the cache file sets self.facts"""
if not self.want_facts: if not self.want_facts:
return return
cache = open(self.cache_path_facts, 'r') with open(self.cache_path_facts, 'r') as fp:
json_facts = cache.read() self.facts = json.load(fp)
self.facts = json.loads(json_facts)
def load_hostcollections_from_cache(self):
"""Read the index from the cache file sets self.hostcollections"""
if not self.want_hostcollections:
return
with open(self.cache_path_hostcollections, 'r') as fp:
self.hostcollections = json.load(fp)
def load_cache_from_cache(self): def load_cache_from_cache(self):
"""Read the cache from the cache file sets self.cache""" """Read the cache from the cache file sets self.cache"""
cache = open(self.cache_path_cache, 'r') with open(self.cache_path_cache, 'r') as fp:
json_cache = cache.read() self.cache = json.load(fp)
self.cache = json.loads(json_cache)
def get_inventory(self): def get_inventory(self):
if self.args.refresh_cache or not self.is_cache_valid(): if self.args.refresh_cache or not self.is_cache_valid():
@@ -329,7 +391,10 @@ class ForemanInventory(object):
self.load_inventory_from_cache() self.load_inventory_from_cache()
self.load_params_from_cache() self.load_params_from_cache()
self.load_facts_from_cache() self.load_facts_from_cache()
self.load_hostcollections_from_cache()
self.load_cache_from_cache() self.load_cache_from_cache()
if self.scan_new_hosts:
self.update_cache(True)
def get_host_info(self): def get_host_info(self):
"""Get variables about a specific host""" """Get variables about a specific host"""

View File

@@ -40,6 +40,7 @@ based on the data obtained from the libcloud Node object:
- gce_tags - gce_tags
- gce_metadata - gce_metadata
- gce_network - gce_network
- gce_subnetwork
When run in --list mode, instances are grouped by the following categories: When run in --list mode, instances are grouped by the following categories:
- zone: - zone:
@@ -73,7 +74,6 @@ Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
Version: 0.0.3 Version: 0.0.3
''' '''
__requires__ = ['pycrypto>=2.6']
try: try:
import pkg_resources import pkg_resources
except ImportError: except ImportError:
@@ -83,8 +83,8 @@ except ImportError:
# library is used. # library is used.
pass pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v2" USER_AGENT_VERSION = "v2"
import sys import sys
import os import os
@@ -92,7 +92,10 @@ import argparse
from time import time from time import time
import ConfigParser if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
import logging import logging
logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
@@ -213,10 +216,11 @@ class GceInventory(object):
# This provides empty defaults to each key, so that environment # This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able # variable configuration (as opposed to INI configuration) is able
# to work. # to work.
config = ConfigParser.SafeConfigParser(defaults={ config = configparser.SafeConfigParser(defaults={
'gce_service_account_email_address': '', 'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '', 'gce_service_account_pem_file_path': '',
'gce_project_id': '', 'gce_project_id': '',
'gce_zone': '',
'libcloud_secrets': '', 'libcloud_secrets': '',
'inventory_ip_type': '', 'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp', 'cache_path': '~/.ansible/tmp',
@@ -270,10 +274,11 @@ class GceInventory(object):
# exists. # exists.
secrets_path = self.config.get('gce', 'libcloud_secrets') secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False secrets_found = False
try: try:
import secrets import secrets
args = list(getattr(secrets, 'GCE_PARAMS', [])) args = list(secrets.GCE_PARAMS)
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) kwargs = secrets.GCE_KEYWORD_PARAMS
secrets_found = True secrets_found = True
except: except:
pass pass
@@ -291,18 +296,23 @@ class GceInventory(object):
secrets_found = True secrets_found = True
except: except:
pass pass
if not secrets_found: if not secrets_found:
args = [ args = [
self.config.get('gce','gce_service_account_email_address'), self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path') self.config.get('gce', 'gce_service_account_pem_file_path')
] ]
kwargs = {'project': self.config.get('gce', 'gce_project_id')} kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
# If the appropriate environment variables are set, they override # If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs. # other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0]) args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
# Retrieve and return the GCE driver. # Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs) gce = get_driver(Provider.GCE)(*args, **kwargs)
@@ -315,7 +325,7 @@ class GceInventory(object):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call''' If provided, this will be used to filter the results of the grouped_instances call'''
import csv import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader] zones = [r for r in reader]
return [z for z in zones[0]] return [z for z in zones[0]]
@@ -325,17 +335,16 @@ class GceInventory(object):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE') description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True, parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)') help='List instances (default: True)')
parser.add_argument('--host', action='store', parser.add_argument('--host', action='store',
help='Get all information about an instance') help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False, parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)') help='Pretty format (default: False)')
parser.add_argument( parser.add_argument(
'--refresh-cache', action='store_true', default=False, '--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)') help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args() self.args = parser.parse_args()
def node_to_dict(self, inst): def node_to_dict(self, inst):
md = {} md = {}
@@ -347,6 +356,9 @@ class GceInventory(object):
md[entry['key']] = entry['value'] md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
subnet = None
if 'subnetwork' in inst.extra['networkInterfaces'][0]:
subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
# default to exernal IP unless user has specified they prefer internal # default to exernal IP unless user has specified they prefer internal
if self.ip_type == 'internal': if self.ip_type == 'internal':
ssh_host = inst.private_ips[0] ssh_host = inst.private_ips[0]
@@ -367,6 +379,7 @@ class GceInventory(object):
'gce_tags': inst.extra['tags'], 'gce_tags': inst.extra['tags'],
'gce_metadata': md, 'gce_metadata': md,
'gce_network': net, 'gce_network': net,
'gce_subnetwork': subnet,
# Hosts don't have a public name, so we add an IP # Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ssh_host 'ansible_ssh_host': ssh_host
} }
@@ -394,7 +407,7 @@ class GceInventory(object):
all_nodes = [] all_nodes = []
params, more_results = {'maxResults': 500}, True params, more_results = {'maxResults': 500}, True
while more_results: while more_results:
self.driver.connection.gce_params=params self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes()) all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params more_results = 'pageToken' in params
return all_nodes return all_nodes
@@ -470,6 +483,13 @@ class GceInventory(object):
else: else:
groups[stat] = [name] groups[stat] = [name]
for private_ip in node.private_ips:
groups[private_ip] = [name]
if len(node.public_ips) >= 1:
for public_ip in node.public_ips:
groups[public_ip] = [name]
groups["_meta"] = meta groups["_meta"] = meta
return groups return groups

View File

@@ -29,8 +29,11 @@
# - /etc/openstack/clouds.yaml # - /etc/openstack/clouds.yaml
# - /etc/ansible/openstack.yml # - /etc/ansible/openstack.yml
# The clouds.yaml file can contain entries for multiple clouds and multiple # The clouds.yaml file can contain entries for multiple clouds and multiple
# regions of those clouds. If it does, this inventory module will connect to # regions of those clouds. If it does, this inventory module will by default
# all of them and present them as one contiguous inventory. # connect to all of them and present them as one contiguous inventory. You
# can limit to one cloud by passing the `--cloud` parameter, or use the
# OS_CLOUD environment variable. If caching is enabled, and a cloud is
# selected, then per-cloud cache folders will be used.
# #
# See the adjacent openstack.yml file for an example config file # See the adjacent openstack.yml file for an example config file
# There are two ansible inventory specific options that can be set in # There are two ansible inventory specific options that can be set in
@@ -44,6 +47,9 @@
# has failed (for example, bad credentials or being offline). # has failed (for example, bad credentials or being offline).
# When set to False, the inventory will return hosts from # When set to False, the inventory will return hosts from
# whichever other clouds it can contact. (Default: True) # whichever other clouds it can contact. (Default: True)
#
# Also it is possible to pass the correct user by setting an ansible_user: $myuser
# metadata attribute.
import argparse import argparse
import collections import collections
@@ -108,20 +114,28 @@ def get_groups_from_server(server_vars, namegroup=True):
return groups return groups
def get_host_groups(inventory, refresh=False): def get_host_groups(inventory, refresh=False, cloud=None):
(cache_file, cache_expiration_time) = get_cache_settings() (cache_file, cache_expiration_time) = get_cache_settings(cloud)
if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory)) groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups) with open(cache_file, 'w') as f:
f.write(groups)
else: else:
groups = open(cache_file, 'r').read() with open(cache_file, 'r') as f:
groups = f.read()
return groups return groups
def append_hostvars(hostvars, groups, key, server, namegroup=False): def append_hostvars(hostvars, groups, key, server, namegroup=False):
hostvars[key] = dict( hostvars[key] = dict(
ansible_ssh_host=server['interface_ip'], ansible_ssh_host=server['interface_ip'],
ansible_host=server['interface_ip'],
openstack=server) openstack=server)
metadata = server.get('metadata', {})
if 'ansible_user' in metadata:
hostvars[key]['ansible_user'] = metadata['ansible_user']
for group in get_groups_from_server(server, namegroup=namegroup): for group in get_groups_from_server(server, namegroup=namegroup):
groups[group].append(key) groups[group].append(key)
@@ -176,12 +190,14 @@ def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
return True return True
def get_cache_settings(): def get_cache_settings(cloud=None):
config = os_client_config.config.OpenStackConfig( config = os_client_config.config.OpenStackConfig(
config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES)
# For inventory-wide caching # For inventory-wide caching
cache_expiration_time = config.get_cache_expiration_time() cache_expiration_time = config.get_cache_expiration_time()
cache_path = config.get_cache_path() cache_path = config.get_cache_path()
if cloud:
cache_path = '{0}_{1}'.format(cache_path, cloud)
if not os.path.exists(cache_path): if not os.path.exists(cache_path):
os.makedirs(cache_path) os.makedirs(cache_path)
cache_file = os.path.join(cache_path, 'ansible-inventory.cache') cache_file = os.path.join(cache_path, 'ansible-inventory.cache')
@@ -194,6 +210,8 @@ def to_json(in_dict):
def parse_args(): def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module') parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'),
help='Cloud name (default: None')
parser.add_argument('--private', parser.add_argument('--private',
action='store_true', action='store_true',
help='Use private address for ansible host') help='Use private address for ansible host')
@@ -218,6 +236,7 @@ def main():
refresh=args.refresh, refresh=args.refresh,
config_files=config_files, config_files=config_files,
private=args.private, private=args.private,
cloud=args.cloud,
) )
if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
inventory_args.update(dict( inventory_args.update(dict(
@@ -232,7 +251,7 @@ def main():
inventory = shade.inventory.OpenStackInventory(**inventory_args) inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list: if args.list:
output = get_host_groups(inventory, refresh=args.refresh) output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud)
elif args.host: elif args.host:
output = to_json(inventory.get_host(args.host)) output = to_json(inventory.get_host(args.host))
print(output) print(output)

View File

@@ -1,18 +1,10 @@
clouds: clouds:
mordred: vexxhost:
cloud: hp profile: vexxhost
auth: auth:
username: mordred@example.com project_name: 39e296b2-fc96-42bf-8091-cb742fa13da9
password: my-wonderful-password username: fb886a9b-c37b-442a-9be3-964bed961e04
project_name: mordred-tenant password: fantastic-password1
region_name: region-b.geo-1
monty:
cloud: hp
auth:
username: monty.taylor@example.com
password: another-wonderful-password
project_name: monty.taylor@example.com-default-tenant
region_name: region-b.geo-1
rax: rax:
cloud: rackspace cloud: rackspace
auth: auth:
@@ -22,7 +14,7 @@ clouds:
region_name: DFW,ORD,IAD region_name: DFW,ORD,IAD
devstack: devstack:
auth: auth:
auth_url: http://127.0.0.1:35357/v2.0/ auth_url: https://devstack.example.com
username: stack username: stack
password: stack password: stack
project_name: stack project_name: stack

View File

@@ -124,10 +124,10 @@ def create_connection():
# Create parser and add ovirt section if it doesn't exist: # Create parser and add ovirt section if it doesn't exist:
config = configparser.SafeConfigParser( config = configparser.SafeConfigParser(
defaults={ defaults={
'ovirt_url': None, 'ovirt_url': os.environ.get('OVIRT_URL'),
'ovirt_username': None, 'ovirt_username': os.environ.get('OVIRT_USERNAME'),
'ovirt_password': None, 'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
'ovirt_ca_file': None, 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE'),
} }
) )
if not config.has_section('ovirt'): if not config.has_section('ovirt'):

View File

@@ -1,4 +1,8 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C): 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Requirements # Requirements
# - pyvmomi >= 6.0.0.2016.4 # - pyvmomi >= 6.0.0.2016.4
@@ -23,42 +27,49 @@ $ jq '._meta.hostvars[].config' data.json | head
from __future__ import print_function from __future__ import print_function
import argparse
import atexit import atexit
import datetime import datetime
import getpass import itertools
import jinja2 import json
import os import os
import six import re
import ssl import ssl
import sys import sys
import uuid import uuid
from collections import defaultdict
from six.moves import configparser
from time import time from time import time
HAS_PYVMOMI = False import six
from jinja2 import Environment
from six import integer_types, string_types
from six.moves import configparser
try: try:
from pyVmomi import vim import argparse
except ImportError:
sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
try:
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect from pyVim.connect import SmartConnect, Disconnect
HAS_PYVMOMI = True
except ImportError: except ImportError:
pass sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
try:
import json
except ImportError:
import simplejson as json
hasvcr = False def regex_match(s, pattern):
try: '''Custom filter for regex matching'''
import vcr reg = re.compile(pattern)
if reg.match(s):
return True
else:
return False
hasvcr = True
except ImportError: def select_chain_match(inlist, key, pattern):
pass '''Get a key from a list of dicts, squash values to a single list, then filter'''
outlist = [x[key] for x in inlist]
outlist = list(itertools.chain(*outlist))
outlist = [x for x in outlist if regex_match(x, pattern)]
return outlist
class VMwareMissingHostException(Exception): class VMwareMissingHostException(Exception):
@@ -89,10 +100,7 @@ class VMWareInventory(object):
skip_keys = [] skip_keys = []
groupby_patterns = [] groupby_patterns = []
if sys.version_info > (3, 0): safe_types = [bool, str, float, None] + list(integer_types)
safe_types = [int, bool, str, float, None]
else:
safe_types = [int, long, bool, str, float, None]
iter_types = [dict, list] iter_types = [dict, list]
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState'] bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
@@ -104,15 +112,18 @@ class VMWareInventory(object):
custom_fields = {} custom_fields = {}
# use jinja environments to allow for custom filters
env = Environment()
env.filters['regex_match'] = regex_match
env.filters['select_chain_match'] = select_chain_match
# translation table for attributes to fetch for known vim types # translation table for attributes to fetch for known vim types
if not HAS_PYVMOMI:
vimTable = {} vimTable = {
else: vim.Datastore: ['_moId', 'name'],
vimTable = { vim.ResourcePool: ['_moId', 'name'],
vim.Datastore: ['_moId', 'name'], vim.HostSystem: ['_moId', 'name'],
vim.ResourcePool: ['_moId', 'name'], }
vim.HostSystem: ['_moId', 'name'],
}
@staticmethod @staticmethod
def _empty_inventory(): def _empty_inventory():
@@ -156,7 +167,6 @@ class VMWareInventory(object):
return json.dumps(data_to_print, indent=2) return json.dumps(data_to_print, indent=2)
def is_cache_valid(self): def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid ''' ''' Determines if the cache files have expired, or if it is still valid '''
valid = False valid = False
@@ -170,21 +180,16 @@ class VMWareInventory(object):
return valid return valid
def do_api_calls_update_cache(self): def do_api_calls_update_cache(self):
''' Get instances and cache the data ''' ''' Get instances and cache the data '''
self.inventory = self.instances_to_inventory(self.get_instances()) self.inventory = self.instances_to_inventory(self.get_instances())
self.write_to_cache(self.inventory) self.write_to_cache(self.inventory)
def write_to_cache(self, data): def write_to_cache(self, data):
''' Dump inventory to json file ''' ''' Dump inventory to json file '''
with open(self.cache_path_cache, 'wb') as f: with open(self.cache_path_cache, 'wb') as f:
f.write(json.dumps(data)) f.write(json.dumps(data))
def get_inventory_from_cache(self): def get_inventory_from_cache(self):
''' Read in jsonified inventory ''' ''' Read in jsonified inventory '''
jdata = None jdata = None
@@ -193,7 +198,6 @@ class VMWareInventory(object):
return json.loads(jdata) return json.loads(jdata)
def read_settings(self): def read_settings(self):
''' Reads the settings from the vmware_inventory.ini file ''' ''' Reads the settings from the vmware_inventory.ini file '''
scriptbasename = __file__ scriptbasename = __file__
@@ -222,7 +226,7 @@ class VMWareInventory(object):
'resourceconfig', 'resourceconfig',
'alias_pattern': '{{ config.name + "_" + config.uuid }}', 'alias_pattern': '{{ config.name + "_" + config.uuid }}',
'host_pattern': '{{ guest.ipaddress }}', 'host_pattern': '{{ guest.ipaddress }}',
'host_filters': '{{ guest.gueststate == "running" }}', 'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}', 'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
'lower_var_keys': True, 'lower_var_keys': True,
'custom_field_group_prefix': 'vmware_tag_', 'custom_field_group_prefix': 'vmware_tag_',
@@ -239,6 +243,9 @@ class VMWareInventory(object):
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path)) vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
config.read(vmware_ini_path) config.read(vmware_ini_path)
if 'vmware' not in config.sections():
config.add_section('vmware')
# apply defaults # apply defaults
for k, v in defaults['vmware'].items(): for k, v in defaults['vmware'].items():
if not config.has_option('vmware', k): if not config.has_option('vmware', k):
@@ -297,7 +304,6 @@ class VMWareInventory(object):
self.config = config self.config = config
def parse_cli_args(self): def parse_cli_args(self):
''' Command line argument processing ''' ''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi') parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
@@ -314,7 +320,6 @@ class VMWareInventory(object):
self.args = parser.parse_args() self.args = parser.parse_args()
def get_instances(self): def get_instances(self):
''' Get a list of vm instances with pyvmomi ''' ''' Get a list of vm instances with pyvmomi '''
kwargs = {'host': self.server, kwargs = {'host': self.server,
'user': self.username, 'user': self.username,
@@ -329,17 +334,23 @@ class VMWareInventory(object):
return self._get_instances(kwargs) return self._get_instances(kwargs)
def _get_instances(self, inkwargs): def _get_instances(self, inkwargs):
''' Make API calls ''' ''' Make API calls '''
instances = [] instances = []
si = SmartConnect(**inkwargs) try:
si = SmartConnect(**inkwargs)
except ssl.SSLError as connection_error:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
sys.exit("Unable to connect to ESXi server due to %s, "
"please specify validate_certs=False and try again" % connection_error)
except Exception as exc:
self.debugl("Unable to connect to ESXi server due to %s" % exc)
sys.exit("Unable to connect to ESXi server due to %s" % exc)
self.debugl('retrieving all instances') self.debugl('retrieving all instances')
if not si: if not si:
print("Could not connect to the specified host using specified " sys.exit("Could not connect to the specified host using specified "
"username and password") "username and password")
return -1
atexit.register(Disconnect, si) atexit.register(Disconnect, si)
content = si.RetrieveContent() content = si.RetrieveContent()
@@ -370,18 +381,22 @@ class VMWareInventory(object):
instance_tuples.append((instance, ifacts)) instance_tuples.append((instance, ifacts))
self.debugl('facts collected for all instances') self.debugl('facts collected for all instances')
cfm = content.customFieldsManager try:
if cfm is not None and cfm.field: cfm = content.customFieldsManager
for f in cfm.field: if cfm is not None and cfm.field:
if f.managedObjectType == vim.VirtualMachine: for f in cfm.field:
self.custom_fields[f.key] = f.name if f.managedObjectType == vim.VirtualMachine:
self.debugl('%d custom fieds collected' % len(self.custom_fields)) self.custom_fields[f.key] = f.name
self.debugl('%d custom fields collected' % len(self.custom_fields))
except vmodl.RuntimeFault as exc:
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
except IndexError as exc:
self.debugl("Unable to gather custom fields due to %s" % exc)
return instance_tuples return instance_tuples
def instances_to_inventory(self, instances): def instances_to_inventory(self, instances):
''' Convert a list of vm objects into a json compliant inventory ''' ''' Convert a list of vm objects into a json compliant inventory '''
self.debugl('re-indexing instances based on ini settings') self.debugl('re-indexing instances based on ini settings')
inventory = VMWareInventory._empty_inventory() inventory = VMWareInventory._empty_inventory()
inventory['all'] = {} inventory['all'] = {}
@@ -412,7 +427,7 @@ class VMWareInventory(object):
# Reset the inventory keys # Reset the inventory keys
for k, v in name_mapping.items(): for k, v in name_mapping.items():
if not host_mapping or not k in host_mapping: if not host_mapping or k not in host_mapping:
continue continue
# set ansible_host (2.x) # set ansible_host (2.x)
@@ -467,7 +482,7 @@ class VMWareInventory(object):
for k, v in inventory['_meta']['hostvars'].items(): for k, v in inventory['_meta']['hostvars'].items():
if 'customvalue' in v: if 'customvalue' in v:
for tv in v['customvalue']: for tv in v['customvalue']:
if not isinstance(tv['value'], str) and not isinstance(tv['value'], unicode): if not isinstance(tv['value'], string_types):
continue continue
newkey = None newkey = None
@@ -493,12 +508,11 @@ class VMWareInventory(object):
return inventory return inventory
def create_template_mapping(self, inventory, pattern, dtype='string'): def create_template_mapping(self, inventory, pattern, dtype='string'):
''' Return a hash of uuid to templated string from pattern ''' ''' Return a hash of uuid to templated string from pattern '''
mapping = {} mapping = {}
for k, v in inventory['_meta']['hostvars'].items(): for k, v in inventory['_meta']['hostvars'].items():
t = jinja2.Template(pattern) t = self.env.from_string(pattern)
newkey = None newkey = None
try: try:
newkey = t.render(v) newkey = t.render(v)
@@ -544,15 +558,27 @@ class VMWareInventory(object):
for idx, x in enumerate(parts): for idx, x in enumerate(parts):
# if the val wasn't set yet, get it from the parent if isinstance(val, dict):
if not val: if x in val:
val = getattr(vm, x) val = val.get(x)
elif x.lower() in val:
val = val.get(x.lower())
else: else:
# in a subkey, get the subprop from the previous attrib # if the val wasn't set yet, get it from the parent
try: if not val:
val = getattr(val, x) try:
except AttributeError as e: val = getattr(vm, x)
self.debugl(e) except AttributeError as e:
self.debugl(e)
else:
# in a subkey, get the subprop from the previous attrib
try:
val = getattr(val, x)
except AttributeError as e:
self.debugl(e)
# make sure it serializes
val = self._process_object_types(val)
# lowercase keys if requested # lowercase keys if requested
if self.lowerkeys: if self.lowerkeys:
@@ -569,7 +595,6 @@ class VMWareInventory(object):
return rdata return rdata
def facts_from_vobj(self, vobj, level=0): def facts_from_vobj(self, vobj, level=0):
''' Traverse a VM object and return a json compliant data structure ''' ''' Traverse a VM object and return a json compliant data structure '''
# pyvmomi objects are not yet serializable, but may be one day ... # pyvmomi objects are not yet serializable, but may be one day ...
@@ -616,7 +641,7 @@ class VMWareInventory(object):
return rdata return rdata
def _process_object_types(self, vobj, thisvm=None, inkey=None, level=0): def _process_object_types(self, vobj, thisvm=None, inkey='', level=0):
''' Serialize an object ''' ''' Serialize an object '''
rdata = {} rdata = {}
@@ -640,12 +665,10 @@ class VMWareInventory(object):
rdata = vobj.decode('ascii', 'ignore') rdata = vobj.decode('ascii', 'ignore')
elif issubclass(type(vobj), bool) or isinstance(vobj, bool): elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
rdata = vobj rdata = vobj
elif issubclass(type(vobj), int) or isinstance(vobj, int): elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types):
rdata = vobj rdata = vobj
elif issubclass(type(vobj), float) or isinstance(vobj, float): elif issubclass(type(vobj), float) or isinstance(vobj, float):
rdata = vobj rdata = vobj
elif issubclass(type(vobj), long) or isinstance(vobj, long):
rdata = vobj
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple): elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
rdata = [] rdata = []
try: try:
@@ -703,14 +726,13 @@ class VMWareInventory(object):
return rdata return rdata
def get_host_info(self, host): def get_host_info(self, host):
''' Return hostvars for a single host ''' ''' Return hostvars for a single host '''
if host in self.inventory['_meta']['hostvars']: if host in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][host] return self.inventory['_meta']['hostvars'][host]
elif self.args.host and self.inventory['_meta']['hostvars']: elif self.args.host and self.inventory['_meta']['hostvars']:
match = None match = None
for k, v in self.inventory['_meta']['hostvars']: for k, v in self.inventory['_meta']['hostvars'].items():
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host: if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
match = k match = k
break break