From bc5881ad219f042c06f163152b5f3eedf3b8f732 Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Tue, 29 Jan 2019 14:59:16 -0500 Subject: [PATCH] Primary development of inventory plugins, partial compat layer Initialize some inventory plugin test data files Implement openstack inventory plugin This may be removed later: - port non-JSON line strip method from core Dupliate effort with AWX mainline devel - Produce ansible_version related to venv Refactor some of injector management, moving more of this overhead into tasks.py, when it comes to managing injector kwargs Upgrade and move openstack inventory script sync up parameters Add extremely detailed logic to inventory file creation for ec2, Azure, and gce so that they are closer to a genuine superset of what the contrib script used to give. --- .../management/commands/inventory_import.py | 14 +- awx/main/models/inventory.py | 318 +++++++++++++++--- awx/main/tasks.py | 42 ++- .../inventory/plugins/azure_rm/azure_rm.yml | 4 + .../data/inventory/plugins/ec2/aws_ec2.yml | 4 + .../plugins/openstack/file_reference | 13 + .../inventory/plugins/openstack/openstack.yml | 6 + .../test_inventory_source_injectors.py | 7 +- awx/main/tests/unit/utils/test_ansible.py | 8 + awx/main/utils/ansible.py | 66 +++- awx/main/utils/common.py | 11 +- .../{openstack.py => openstack_inventory.py} | 36 +- requirements/requirements_ansible.in | 2 +- requirements/requirements_ansible.txt | 7 +- 14 files changed, 443 insertions(+), 95 deletions(-) create mode 100644 awx/main/tests/data/inventory/plugins/azure_rm/azure_rm.yml create mode 100644 awx/main/tests/data/inventory/plugins/ec2/aws_ec2.yml create mode 100644 awx/main/tests/data/inventory/plugins/openstack/file_reference create mode 100644 awx/main/tests/data/inventory/plugins/openstack/openstack.yml rename awx/plugins/inventory/{openstack.py => openstack_inventory.py} (90%) diff --git a/awx/main/management/commands/inventory_import.py b/awx/main/management/commands/inventory_import.py index 83f37949dc..c15d7c82c6 100644 --- a/awx/main/management/commands/inventory_import.py +++ b/awx/main/management/commands/inventory_import.py @@ -27,6 +27,7 @@ from awx.main.models.inventory import ( Host ) from awx.main.utils.mem_inventory import MemInventory, dict_to_mem_data +from awx.main.utils.ansible import filter_non_json_lines # other AWX imports from awx.main.models.rbac import batch_role_ancestor_rebuilding @@ -173,15 +174,21 @@ class AnsibleInventoryLoader(object): cmd = self.get_proot_args(cmd, env) proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) - stdout, stderr = proc.communicate() - stdout = smart_text(stdout) + raw_stdout, stderr = proc.communicate() + raw_stdout = smart_text(raw_stdout) stderr = smart_text(stderr) if self.tmp_private_dir: shutil.rmtree(self.tmp_private_dir, True) if proc.returncode != 0: raise RuntimeError('%s failed (rc=%d) with stdout:\n%s\nstderr:\n%s' % ( - self.method, proc.returncode, stdout, stderr)) + self.method, proc.returncode, raw_stdout, stderr)) + + # Openstack inventory plugin gives non-JSON lines + # Also, running with higher verbosity gives non-JSON lines + stdout = filter_non_json_lines(raw_stdout) + if stdout is not raw_stdout: + logger.warning('Output had lines stripped to obtain JSON format.') for line in stderr.splitlines(): logger.error(line) @@ -313,6 +320,7 @@ class Command(BaseCommand): source = source.replace('rhv.py', 'ovirt4.py') source = source.replace('satellite6.py', 'foreman.py') source = source.replace('vmware.py', 'vmware_inventory.py') + source = source.replace('openstack.py', 'openstack_inventory.py') if not os.path.exists(source): raise IOError('Source does not exist: %s' % source) source = os.path.join(os.getcwd(), os.path.dirname(source), diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index ec19ebb6ab..4551dfc4a0 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -1219,12 +1219,21 @@ class InventorySourceOptions(BaseModel): ('ami_id', _('Image ID')), ('availability_zone', _('Availability Zone')), ('aws_account', _('Account')), + # These should have been added, but plugins do not support them + # so we will avoid introduction, because it would regress anyway + # ('elasticache_cluster', _('ElastiCache Cluster')), + # ('elasticache_engine', _('ElastiCache Engine')), + # ('elasticache_parameter_group', _('ElastiCache Parameter Group')), + # ('elasticache_replication_group', _('ElastiCache Replication Group')), ('instance_id', _('Instance ID')), ('instance_state', _('Instance State')), ('platform', _('Platform')), ('instance_type', _('Instance Type')), ('key_pair', _('Key Name')), + # ('rds_engine', _('RDS Engine')), + # ('rds_parameter_group', _('RDP Parameter Group')), ('region', _('Region')), + # ('route53_names', _('Route53 Names')), ('security_group', _('Security Group')), ('tag_keys', _('Tags')), ('tag_none', _('Tag None')), @@ -1315,16 +1324,6 @@ class InventorySourceOptions(BaseModel): ) return None - def get_inventory_plugin_name(self, ansible_version): - if self.source in InventorySourceOptions.injectors: - return InventorySourceOptions.injectors[self.source](ansible_version).use_plugin_name() - if self.source in CLOUD_PROVIDERS or self.source == 'custom': - # TODO: today, all vendored sources are scripts - # in future release inventory plugins will replace these - return 'script' - # in other cases we do not specify which plugin to use - return None - def get_cloud_credential(self): """Return the credential which is directly tied to the inventory source type. """ @@ -1844,13 +1843,6 @@ class PluginFileInjector(object): Version(self.ansible_version) >= Version(self.initial_version) ) - def use_plugin_name(self): - if self.should_use_plugin() and self.plugin_name is not None: - return self.plugin_name - else: - # By default, if the plugin cannot be used, then we use old vendored scripts - return 'script' - @staticmethod def get_builtin_injector(source): from awx.main.models.credential import injectors as builtin_injectors @@ -1867,10 +1859,7 @@ class PluginFileInjector(object): env.update(injector_env) return env - def get_plugin_env(self, inventory_update, private_data_dir, private_data_files, safe=False): - return self.get_script_env(inventory_update, private_data_dir, private_data_files, safe) - - def get_script_env(self, inventory_update, private_data_dir, private_data_files, safe=False): + def _get_shared_env(self, inventory_update, private_data_dir, private_data_files, safe=False): """By default, we will apply the standard managed_by_tower injectors for the script injection """ @@ -1882,9 +1871,17 @@ class PluginFileInjector(object): if safe: from awx.main.models.credential import build_safe_env injected_env = build_safe_env(injected_env) + return injected_env - # Put in env var reference to private data files, if relevant + def get_plugin_env(self, inventory_update, private_data_dir, private_data_files, safe=False): + return self._get_shared_env(inventory_update, private_data_dir, private_data_files, safe) + + def get_script_env(self, inventory_update, private_data_dir, private_data_files, safe=False): + injected_env = self._get_shared_env(inventory_update, private_data_dir, private_data_files, safe) + + # Put in env var reference to private ini data files, if relevant if self.ini_env_reference: + credential = inventory_update.get_cloud_credential() cred_data = private_data_files.get('credentials', '') injected_env[self.ini_env_reference] = cred_data[credential] @@ -1922,14 +1919,29 @@ class azure_rm(PluginFileInjector): initial_version = '2.7' ini_env_reference = 'AZURE_INI_PATH' - def inventory_as_dict(self, inventory_source): + def inventory_as_dict(self, inventory_update, private_data_dir): ret = dict( - plugin='azure_rm', + plugin=self.plugin_name, + # By default the script did not filter hosts + default_host_filters=[], + # Groups that the script returned + keyed_groups=[ + {'prefix': '', 'separator': '', 'key': 'location'}, + {'prefix': '', 'separator': '', 'key': 'powerstate'}, + {'prefix': '', 'separator': '', 'key': 'name'} + ], + hostvar_expressions={ + 'provisioning_state': 'provisioning_state | title', + 'computer_name': 'name', + 'type': 'resource_type', + 'private_ip': 'private_ipv4_addresses | json_query("[0]")' + } ) + # TODO: all regions currently failing due to: # https://github.com/ansible/ansible/pull/48079 - if inventory_source.source_regions and 'all' not in inventory_source.source_regions: - ret['regions'] = inventory_source.source_regions.split(',') + if inventory_update.source_regions and 'all' not in inventory_update.source_regions: + ret['regions'] = inventory_update.source_regions.split(',') return ret def build_script_private_data(self, inventory_update, private_data_dir): @@ -1955,17 +1967,132 @@ class azure_rm(PluginFileInjector): class ec2(PluginFileInjector): plugin_name = 'aws_ec2' - initial_version = '2.5' + initial_version = '2.6' # 2.5 has bugs forming keyed groups ini_env_reference = 'EC2_INI_PATH' - def inventory_as_dict(self, inventory_source): + def _compat_compose_vars(self): + # https://gist.github.com/s-hertel/089c613914c051f443b53ece6995cc77 + return { + # vars that change + 'ec2_block_devices': ( + "dict(block_device_mappings | map(attribute='device_name') | list | zip(block_device_mappings " + "| map(attribute='ebs.volume_id') | list))" + ), + 'ec2_dns_name': 'public_dns_name', + 'ec2_group_name': 'placement.group_name', + 'ec2_instance_profile': 'iam_instance_profile | default("")', + 'ec2_ip_address': 'public_ip_address', + 'ec2_kernel': 'kernel_id | default("")', + 'ec2_monitored': "monitoring.state in ['enabled', 'pending']", + 'ec2_monitoring_state': 'monitoring.state', + 'ec2_placement': 'placement.availability_zone', + 'ec2_ramdisk': 'ramdisk_id | default("")', + 'ec2_reason': 'state_transition_reason', + 'ec2_security_group_ids': "security_groups | map(attribute='group_id') | list | join(',')", + 'ec2_security_group_names': "security_groups | map(attribute='group_name') | list | join(',')", + 'ec2_state': 'state.name', + 'ec2_state_code': 'state.code', + 'ec2_state_reason': 'state_reason.message if state_reason is defined else ""', + 'ec2_sourceDestCheck': 'source_dest_check | lower | string', # butchered snake_case case not a typo. + 'ec2_account_id': 'network_interfaces | json_query("[0].owner_id")', + # vars that just need ec2_ prefix + 'ec2_ami_launch_index': 'ami_launch_index | string', + 'ec2_architecture': 'architecture', + 'ec2_client_token': 'client_token', + 'ec2_ebs_optimized': 'ebs_optimized', + 'ec2_hypervisor': 'hypervisor', + 'ec2_image_id': 'image_id', + 'ec2_instance_type': 'instance_type', + 'ec2_key_name': 'key_name', + 'ec2_launch_time': 'launch_time', + 'ec2_platform': 'platform | default("")', + 'ec2_private_dns_name': 'private_dns_name', + 'ec2_private_ip_address': 'private_ip_address', + 'ec2_public_dns_name': 'public_dns_name', + 'ec2_region': 'placement.region', + 'ec2_root_device_name': 'root_device_name', + 'ec2_root_device_type': 'root_device_type', + 'ec2_spot_instance_request_id': 'spot_instance_request_id', + 'ec2_subnet_id': 'subnet_id', + 'ec2_virtualization_type': 'virtualization_type', + 'ec2_vpc_id': 'vpc_id' + } + + def inventory_as_dict(self, inventory_update, private_data_dir): + keyed_groups = [] + group_by_hostvar = { + 'ami_id': {'prefix': '', 'separator': '', 'key': 'image_id'}, + 'availability_zone': {'prefix': '', 'separator': '', 'key': 'placement.availability_zone'}, + 'aws_account': None, # not an option with plugin + 'instance_id': {'prefix': '', 'separator': '', 'key': 'instance_id'}, # normally turned off + 'instance_state': {'prefix': 'instance_state', 'key': 'state.name'}, + 'platform': {'prefix': 'platform', 'key': 'platform'}, + 'instance_type': {'prefix': 'type', 'key': 'instance_type'}, + 'key_pair': {'prefix': 'key', 'key': 'key_name'}, + 'region': {'prefix': '', 'separator': '', 'key': 'placement.region'}, + # Security requires some ninja jinja2 syntax, credit to s-hertel + 'security_group': {'prefix': 'security_group', 'key': 'security_groups | json_query("[].group_name")'}, + 'tag_keys': {'prefix': 'tag', 'key': 'tags'}, + 'tag_none': None, # grouping by no tags isn't a different thing with plugin + # naming is redundant, like vpc_id_vpc_8c412cea, but intended + 'vpc_id': {'prefix': 'vpc_id', 'key': 'vpc_id'}, + } + # -- same as script here -- + group_by = [x.strip().lower() for x in inventory_update.group_by.split(',') if x.strip()] + for choice in inventory_update.get_ec2_group_by_choices(): + value = bool((group_by and choice[0] in group_by) or (not group_by and choice[0] != 'instance_id')) + # -- end sameness to script -- + if value: + this_keyed_group = group_by_hostvar.get(choice[0], None) + # If a keyed group syntax does not exist, there is nothing we can do to get this group + if this_keyed_group is not None: + keyed_groups.append(this_keyed_group) + + # Instance ID not part of compat vars, because of settings.EC2_INSTANCE_ID_VAR + # remove this variable at your own peril, there be dragons + compose_dict = {'ec2_id': 'instance_id'} + # TODO: add an ability to turn this off + compose_dict.update(self._compat_compose_vars()) + + inst_filters = { + # The script returned all states by default, the plugin does not + # https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options + # options: pending | running | shutting-down | terminated | stopping | stopped + 'instance-state-name': [ + 'running' + # 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' + ] + } + if inventory_update.instance_filters: + # logic used to live in ec2.py, now it belongs to us. Yay more code? + filter_sets = [f for f in inventory_update.instance_filters.split(',') if f] + + for instance_filter in filter_sets: + # AND logic not supported, unclear how to... + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + inst_filters[filter_key] = filter_value + ret = dict( - plugin='aws_ec2', + plugin=self.plugin_name, + hostnames=[ + 'network-interface.addresses.association.public-ip', # non-default + 'dns-name', + 'private-dns-name' + ], + keyed_groups=keyed_groups, + groups={'ec2': True}, # plugin provides "aws_ec2", but not this + compose=compose_dict, + filters=inst_filters ) # TODO: all regions currently failing due to: # https://github.com/ansible/ansible/pull/48079 - if inventory_source.source_regions and 'all' not in inventory_source.source_regions: - ret['regions'] = inventory_source.source_regions.split(',') + if inventory_update.source_regions and 'all' not in inventory_update.source_regions: + ret['regions'] = inventory_update.source_regions.split(',') return ret def build_script_private_data(self, inventory_update, private_data_dir): @@ -2023,19 +2150,57 @@ class gce(PluginFileInjector): env['GCE_INI_PATH'] = path return env + def _compat_compose_vars(self): + # missing: gce_image, gce_uuid + # https://github.com/ansible/ansible/issues/51884 + return { + 'gce_id': 'id', + 'gce_description': 'description | default(None)', + 'gce_machine_type': 'machineType', + 'gce_name': 'name', + 'gce_network': 'networkInterfaces | json_query("[0].network.name")', + 'gce_private_ip': 'networkInterfaces | json_query("[0].networkIP")', + 'gce_public_ip': 'networkInterfaces | json_query("[0].accessConfigs[0].natIP")', + 'gce_status': 'status', + 'gce_subnetwork': 'networkInterfaces | json_query("[0].subnetwork.name")', + 'gce_tags': 'tags | json_query("items")', + 'gce_zone': 'zone' + } + def inventory_as_dict(self, inventory_update, private_data_dir): - # NOTE: generalizing this to be use templating like credential types would be nice - # but with YAML content that need to inject list parameters into the YAML, - # it is hard to see any clean way we can possibly do this credential = inventory_update.get_cloud_credential() builtin_injector = self.get_builtin_injector(inventory_update.source) creds_path = builtin_injector(credential, {}, private_data_dir) + + # gce never processed ther group_by options, if it had, we would selectively + # apply those options here, but it didn't, so they are added here + # and we may all hope that one day they can die, and rest in peace + keyed_groups = [ + # the jinja2 syntax is duplicated with compose + # https://github.com/ansible/ansible/issues/51883 + {'prefix': '', 'separator': '', 'key': 'networkInterfaces | json_query("[0].networkIP")'}, # gce_private_ip + {'prefix': '', 'separator': '', 'key': 'networkInterfaces | json_query("[0].accessConfigs[0].natIP")'}, # gce_public_ip + {'prefix': '', 'separator': '', 'key': 'machineType'}, + {'prefix': '', 'separator': '', 'key': 'zone'}, + {'prefix': 'tag', 'key': 'tags | json_query("items")'}, # gce_tags + {'prefix': 'status', 'key': 'status | lower'} + ] + + # We need this as long as hostnames is non-default, otherwise hosts + # will not be addressed correctly, so not considered a "compat" change + compose_dict = {'ansible_ssh_host': 'networkInterfaces | json_query("[0].accessConfigs[0].natIP")'} + # These are only those necessary to emulate old hostvars + compose_dict.update(self._compat_compose_vars()) + ret = dict( - plugin='gcp_compute', + plugin=self.plugin_name, projects=[credential.get_input('project', default='')], filters=None, # necessary cruft, see: https://github.com/ansible/ansible/pull/50025 service_account_file=creds_path, - auth_kind="serviceaccount" + auth_kind="serviceaccount", + hostnames=['name', 'public_ip', 'private_ip'], # need names to match with script + keyed_groups=keyed_groups, + compose=compose_dict, ) if inventory_update.source_regions and 'all' not in inventory_update.source_regions: ret['zones'] = inventory_update.source_regions.split(',') @@ -2076,11 +2241,10 @@ class vmware(PluginFileInjector): class openstack(PluginFileInjector): ini_env_reference = 'OS_CLIENT_CONFIG_FILE' + plugin_name = 'openstack' + initial_version = '2.5' - def build_script_private_data(self, inventory_update, private_data_dir): - credential = inventory_update.get_cloud_credential() - private_data = {'credentials': {}} - + def _get_clouds_dict(self, inventory_update, credential, private_data_dir, mk_cache=True): openstack_auth = dict(auth_url=credential.get_input('host', default=''), username=credential.get_input('username', default=''), password=credential.get_input('password', default=''), @@ -2090,14 +2254,6 @@ class openstack(PluginFileInjector): private_state = inventory_update.source_vars_dict.get('private', True) verify_state = credential.get_input('verify_ssl', default=True) - # Retrieve cache path from inventory update vars if available, - # otherwise create a temporary cache path only for this update. - cache = inventory_update.source_vars_dict.get('cache', {}) - if not isinstance(cache, dict): - cache = {} - if not cache.get('path', ''): - cache_path = tempfile.mkdtemp(prefix='openstack_cache', dir=private_data_dir) - cache['path'] = cache_path openstack_data = { 'clouds': { 'devstack': { @@ -2106,8 +2262,17 @@ class openstack(PluginFileInjector): 'auth': openstack_auth, }, }, - 'cache': cache, } + if mk_cache: + # Retrieve cache path from inventory update vars if available, + # otherwise create a temporary cache path only for this update. + cache = inventory_update.source_vars_dict.get('cache', {}) + if not isinstance(cache, dict): + cache = {} + if not cache.get('path', ''): + cache_path = tempfile.mkdtemp(prefix='openstack_cache', dir=private_data_dir) + cache['path'] = cache_path + openstack_data['cache'] = cache ansible_variables = { 'use_hostnames': True, 'expand_hostvars': False, @@ -2119,12 +2284,67 @@ class openstack(PluginFileInjector): ansible_variables[var_name] = inventory_update.source_vars_dict[var_name] provided_count += 1 if provided_count: + # Must we provide all 3 because the user provides any 1 of these?? + # this probably results in some incorrect mangling of the defaults openstack_data['ansible'] = ansible_variables + return openstack_data + + def build_script_private_data(self, inventory_update, private_data_dir): + credential = inventory_update.get_cloud_credential() + private_data = {'credentials': {}} + + openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir) private_data['credentials'][credential] = yaml.safe_dump( openstack_data, default_flow_style=False, allow_unicode=True ) return private_data + def inventory_as_dict(self, inventory_update, private_data_dir): + credential = inventory_update.get_cloud_credential() + + openstack_data = self._get_clouds_dict(inventory_update, credential, private_data_dir, mk_cache=False) + handle, path = tempfile.mkstemp(dir=private_data_dir) + f = os.fdopen(handle, 'w') + yaml.dump(openstack_data, f, default_flow_style=False) + f.close() + os.chmod(path, stat.S_IRUSR | stat.S_IWUSR) + + def use_host_name_for_name(a_bool_maybe): + if not isinstance(a_bool_maybe, bool): + # Could be specified by user via "host" or "uuid" + return a_bool_maybe + elif a_bool_maybe: + return 'name' # plugin default + else: + return 'uuid' + + ret = dict( + plugin=self.plugin_name, + fail_on_errors=True, + expand_hostvars=True, + inventory_hostname=use_host_name_for_name(False), + clouds_yaml_path=[path] # why a list? it just is + ) + # Note: mucking with defaults will break import integrity + # For the plugin, we need to use the same defaults as the old script + # or else imports will conflict. To find script defaults you have + # to read source code of the script. + # + # Script Defaults Plugin Defaults + # 'use_hostnames': False, 'name' (True) + # 'expand_hostvars': True, 'no' (False) + # 'fail_on_errors': True, 'no' (False) + # + # These are, yet again, different from ansible_variables in script logic + # but those are applied inconsistently + source_vars = inventory_update.source_vars_dict + for var_name in ['expand_hostvars', 'fail_on_errors']: + if var_name in source_vars: + ret[var_name] = source_vars[var_name] + if 'use_hostnames' in source_vars: + ret['inventory_hostname'] = use_host_name_for_name(source_vars['use_hostnames']) + return ret + class rhv(PluginFileInjector): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index fe5563445a..7775362a6b 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -1983,13 +1983,19 @@ class RunInventoryUpdate(BaseTask): env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id) env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk) env.update(STANDARD_INVENTORY_UPDATE_ENV) - plugin_name = inventory_update.get_inventory_plugin_name(self.get_ansible_version(inventory_update)) - if plugin_name is not None: - env['ANSIBLE_INVENTORY_ENABLED'] = plugin_name + injector = None if inventory_update.source in InventorySource.injectors: injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update)) - env = injector.build_env(inventory_update, env, private_data_dir, private_data_files) + + env = injector.build_env(inventory_update, env, private_data_dir, private_data_files) + + if injector is not None: + # All CLOUD_PROVIDERS sources implement as either script or auto plugin + if injector.should_use_plugin(): + env['ANSIBLE_INVENTORY_ENABLED'] = 'auto' + else: + env['ANSIBLE_INVENTORY_ENABLED'] = 'script' if inventory_update.source in ['scm', 'custom']: for env_k in inventory_update.source_vars_dict: @@ -2069,21 +2075,21 @@ class RunInventoryUpdate(BaseTask): def build_inventory(self, inventory_update, private_data_dir): src = inventory_update.source - if src in CLOUD_PROVIDERS: - if src in InventorySource.injectors: - injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update)) - if injector.should_use_plugin(): - content = injector.inventory_contents(inventory_update, private_data_dir) - # must be a statically named file - inventory_path = os.path.join(private_data_dir, injector.filename) - with open(inventory_path, 'w') as f: - f.write(content) - os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) - else: - # Use the vendored script path - inventory_path = self.get_path_to('..', 'plugins', 'inventory', '%s.py' % src) + + injector = None + if inventory_update.source in InventorySource.injectors: + injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update)) + + if injector is not None: + if injector.should_use_plugin(): + content = injector.inventory_contents(inventory_update, private_data_dir) + # must be a statically named file + inventory_path = os.path.join(private_data_dir, injector.filename) + with open(inventory_path, 'w') as f: + f.write(content) + os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) else: - # TODO: get rid of this else after all CLOUD_PROVIDERS have injectors written + # Use the vendored script path inventory_path = self.get_path_to('..', 'plugins', 'inventory', '%s.py' % src) elif src == 'scm': inventory_path = inventory_update.get_actual_source_path() diff --git a/awx/main/tests/data/inventory/plugins/azure_rm/azure_rm.yml b/awx/main/tests/data/inventory/plugins/azure_rm/azure_rm.yml new file mode 100644 index 0000000000..188a7cbb6a --- /dev/null +++ b/awx/main/tests/data/inventory/plugins/azure_rm/azure_rm.yml @@ -0,0 +1,4 @@ +plugin: azure_rm +regions: +- southcentralus +- westus diff --git a/awx/main/tests/data/inventory/plugins/ec2/aws_ec2.yml b/awx/main/tests/data/inventory/plugins/ec2/aws_ec2.yml new file mode 100644 index 0000000000..10237c8077 --- /dev/null +++ b/awx/main/tests/data/inventory/plugins/ec2/aws_ec2.yml @@ -0,0 +1,4 @@ +plugin: aws_ec2 +regions: +- us-east-2 +- ap-south-1 diff --git a/awx/main/tests/data/inventory/plugins/openstack/file_reference b/awx/main/tests/data/inventory/plugins/openstack/file_reference new file mode 100644 index 0000000000..4023ac2b9d --- /dev/null +++ b/awx/main/tests/data/inventory/plugins/openstack/file_reference @@ -0,0 +1,13 @@ +ansible: + expand_hostvars: true + fail_on_errors: true + use_hostnames: false +clouds: + devstack: + auth: + auth_url: https://foo.invalid + domain_name: fooo + password: fooo + project_name: fooo + username: fooo + private: false diff --git a/awx/main/tests/data/inventory/plugins/openstack/openstack.yml b/awx/main/tests/data/inventory/plugins/openstack/openstack.yml new file mode 100644 index 0000000000..69a9c3c982 --- /dev/null +++ b/awx/main/tests/data/inventory/plugins/openstack/openstack.yml @@ -0,0 +1,6 @@ +clouds_yaml_path: +- {{ file_reference }} +expand_hostvars: true +fail_on_errors: true +inventory_hostname: name +plugin: openstack diff --git a/awx/main/tests/functional/test_inventory_source_injectors.py b/awx/main/tests/functional/test_inventory_source_injectors.py index 9c17659594..d47ccde831 100644 --- a/awx/main/tests/functional/test_inventory_source_injectors.py +++ b/awx/main/tests/functional/test_inventory_source_injectors.py @@ -236,7 +236,12 @@ def test_inventory_script_structure(this_kind, script_or_plugin, inventory): create_reference_data(ref_dir, content) pytest.skip('You set MAKE_INVENTORY_REFERENCE_FILES, so this created files, unset to run actual test.') else: - expected_file_list = os.listdir(ref_dir) + try: + expected_file_list = os.listdir(ref_dir) + except FileNotFoundError as e: + raise FileNotFoundError( + 'Maybe you never made reference files? ' + 'MAKE_INVENTORY_REFERENCE_FILES=true py.test ...\noriginal: {}'.format(e)) assert set(expected_file_list) == set(content.keys()), ( 'Inventory update runtime environment does not have expected files' ) diff --git a/awx/main/tests/unit/utils/test_ansible.py b/awx/main/tests/unit/utils/test_ansible.py index 40b3e6ac1e..7c0cb4f101 100644 --- a/awx/main/tests/unit/utils/test_ansible.py +++ b/awx/main/tests/unit/utils/test_ansible.py @@ -1,5 +1,6 @@ import os import os.path +import json import pytest @@ -31,3 +32,10 @@ def test_could_be_inventory(filename): def test_is_not_inventory(filename): path = os.path.join(DATA, 'inventories', 'invalid') assert could_be_inventory(DATA, path, filename) is None + + +def test_filter_non_json_lines(): + data = {'foo': 'bar', 'bar': 'foo'} + dumped_data = json.dumps(data, indent=2) + output = 'Openstack does this\nOh why oh why\n{}\ntrailing lines\nneed testing too'.format(dumped_data) + assert filter_non_json_lines(output) == dumped_data diff --git a/awx/main/utils/ansible.py b/awx/main/utils/ansible.py index 7e68d88189..4854163d36 100644 --- a/awx/main/utils/ansible.py +++ b/awx/main/utils/ansible.py @@ -11,7 +11,7 @@ from itertools import islice from django.utils.encoding import smart_str -__all__ = ['skip_directory', 'could_be_playbook', 'could_be_inventory'] +__all__ = ['skip_directory', 'could_be_playbook', 'could_be_inventory', 'filter_non_json_lines'] valid_playbook_re = re.compile(r'^\s*?-?\s*?(?:hosts|include|import_playbook):\s*?.*?$') @@ -97,3 +97,67 @@ def could_be_inventory(project_path, dir_path, filename): except IOError: return None return inventory_rel_path + + +# This method is copied directly from Ansible core code base +# lib/ansible/module_utils/json_utils.py +# For purpose, see: https://github.com/ansible/ansible/issues/50100 +# Any patches to this method should sync from that version +# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any +# changes are propagated there. +def _filter_non_json_lines(data): + ''' + Used to filter unrelated output around module JSON output, like messages from + tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). + Filters leading lines before first line-starting occurrence of '{' or '[', and filter all + trailing lines after matching close character (working from the bottom of output). + ''' + warnings = [] + + # Filter initial junk + lines = data.splitlines() + + for start, line in enumerate(lines): + line = line.strip() + if line.startswith(u'{'): + endchar = u'}' + break + elif line.startswith(u'['): + endchar = u']' + break + else: + raise ValueError('No start of json char found') + + # Filter trailing junk + lines = lines[start:] + + for reverse_end_offset, line in enumerate(reversed(lines)): + if line.strip().endswith(endchar): + break + else: + raise ValueError('No end of json char found') + + if reverse_end_offset > 0: + # Trailing junk is uncommon and can point to things the user might + # want to change. So print a warning if we find any + trailing_junk = lines[len(lines) - reverse_end_offset:] + for line in trailing_junk: + if line.strip(): + warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk)) + break + + lines = lines[:(len(lines) - reverse_end_offset)] + + # NOTE: warnings are undesired (would prevent JSON parsing) + # so this change diverges from the source by not using the warnings + # original: + # return ('\n'.join(lines), warnings) + return '\n'.join(lines) + + +def filter_non_json_lines(data): + # Optimization on top of Ansible's method to avoid operations on large + # strings when it is given in standard ansible-inventory form + if data.startswith(u'{') and data.endswith(u'}'): + return data + return _filter_non_json_lines(data) diff --git a/awx/main/utils/common.py b/awx/main/utils/common.py index fa225fb36e..a3279aa094 100644 --- a/awx/main/utils/common.py +++ b/awx/main/utils/common.py @@ -153,13 +153,13 @@ def memoize_delete(function_name): return cache.delete(function_name) -@memoize() -def get_ansible_version(): +def _get_ansible_version(ansible_path): ''' Return Ansible version installed. + Ansible path needs to be provided to account for custom virtual environments ''' try: - proc = subprocess.Popen(['ansible', '--version'], + proc = subprocess.Popen([ansible_path, '--version'], stdout=subprocess.PIPE) result = smart_str(proc.communicate()[0]) return result.split('\n')[0].replace('ansible', '').strip() @@ -167,6 +167,11 @@ def get_ansible_version(): return 'unknown' +@memoize() +def get_ansible_version(ansible_path='ansible'): + return _get_ansible_version(ansible_path) + + @memoize() def get_ssh_version(): ''' diff --git a/awx/plugins/inventory/openstack.py b/awx/plugins/inventory/openstack_inventory.py similarity index 90% rename from awx/plugins/inventory/openstack.py rename to awx/plugins/inventory/openstack_inventory.py index 05894c7bf3..ab2d96cb8b 100755 --- a/awx/plugins/inventory/openstack.py +++ b/awx/plugins/inventory/openstack_inventory.py @@ -57,15 +57,13 @@ import os import sys import time from distutils.version import StrictVersion +from io import StringIO -try: - import json -except: - import simplejson as json +import json -import os_client_config -import shade -import shade.inventory +import openstack as sdk +from openstack.cloud import inventory as sdk_inventory +from openstack.config import loader as cloud_config CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] @@ -149,7 +147,7 @@ def get_host_groups_from_cloud(inventory): if hasattr(inventory, 'extra_config'): use_hostnames = inventory.extra_config['use_hostnames'] list_args['expand'] = inventory.extra_config['expand_hostvars'] - if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): + if StrictVersion(sdk.version.__version__) >= StrictVersion("0.13.0"): list_args['fail_on_cloud_config'] = \ inventory.extra_config['fail_on_errors'] else: @@ -192,8 +190,13 @@ def is_cache_stale(cache_file, cache_expiration_time, refresh=False): def get_cache_settings(cloud=None): - config = os_client_config.config.OpenStackConfig( - config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) + config_files = cloud_config.CONFIG_FILES + CONFIG_FILES + if cloud: + config = cloud_config.OpenStackConfig( + config_files=config_files).get_one(cloud=cloud) + else: + config = cloud_config.OpenStackConfig( + config_files=config_files).get_all()[0] # For inventory-wide caching cache_expiration_time = config.get_cache_expiration_time() cache_path = config.get_cache_path() @@ -231,15 +234,17 @@ def parse_args(): def main(): args = parse_args() try: - config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES - shade.simple_logging(debug=args.debug) + # openstacksdk library may write to stdout, so redirect this + sys.stdout = StringIO() + config_files = cloud_config.CONFIG_FILES + CONFIG_FILES + sdk.enable_logging(debug=args.debug) inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, cloud=args.cloud, ) - if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + if hasattr(sdk_inventory.OpenStackInventory, 'extra_config'): inventory_args.update(dict( config_key='ansible', config_defaults={ @@ -249,14 +254,15 @@ def main(): } )) - inventory = shade.inventory.OpenStackInventory(**inventory_args) + inventory = sdk_inventory.OpenStackInventory(**inventory_args) + sys.stdout = sys.__stdout__ if args.list: output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) - except shade.OpenStackCloudException as e: + except sdk.exceptions.OpenStackCloudException as e: sys.stderr.write('%s\n' % e.message) sys.exit(1) sys.exit(0) diff --git a/requirements/requirements_ansible.in b/requirements/requirements_ansible.in index c28d53280b..78cd44124d 100644 --- a/requirements/requirements_ansible.in +++ b/requirements/requirements_ansible.in @@ -50,4 +50,4 @@ pywinrm[kerberos]==0.3.0 requests requests-credssp==0.1.0 # For windows authentication awx/issues/1144 # OpenStack -shade==1.27.0 +openstacksdk==0.23.0 diff --git a/requirements/requirements_ansible.txt b/requirements/requirements_ansible.txt index 36f346bc61..b575a21a09 100644 --- a/requirements/requirements_ansible.txt +++ b/requirements/requirements_ansible.txt @@ -74,13 +74,12 @@ netaddr==0.7.19 netifaces==0.10.6 # via openstacksdk ntlm-auth==1.0.6 # via requests-credssp, requests-ntlm oauthlib==2.0.6 # via requests-oauthlib -openstacksdk==0.12.0 # via shade -os-client-config==1.29.0 # via shade +openstacksdk==0.23.0 os-service-types==1.2.0 # via openstacksdk ovirt-engine-sdk-python==4.2.4 packaging==17.1 paramiko==2.4.0 # via azure-cli-core, ncclient -pbr==3.1.1 # via keystoneauth1, openstacksdk, os-service-types, shade, stevedore +pbr==3.1.1 # via keystoneauth1, openstacksdk, os-service-types, stevedore pexpect==4.6.0 psutil==5.4.3 ptyprocess==0.5.2 # via pexpect @@ -108,7 +107,7 @@ rsa==4.0 # via google-auth s3transfer==0.1.13 # via boto3 secretstorage==2.3.1 # via keyring selectors2==2.0.1 # via ncclient -shade==1.27.0 + six==1.11.0 # via azure-cli-core, bcrypt, cryptography, google-auth, isodate, keystoneauth1, knack, munch, ncclient, ntlm-auth, openstacksdk, ovirt-engine-sdk-python, packaging, pynacl, pyopenssl, python-dateutil, pyvmomi, pywinrm, stevedore stevedore==1.28.0 # via keystoneauth1 tabulate==0.7.7 # via azure-cli-core, knack