mirror of
https://github.com/ansible/awx.git
synced 2026-01-09 23:12:08 -03:30
Merge pull request #6283 from AlanCoding/vendoring_collections
Use vendored collections for inventory imports Reviewed-by: https://github.com/apps/softwarefactory-project-zuul
This commit is contained in:
commit
c0e07198cf
@ -10,6 +10,7 @@ recursive-include awx/playbooks *.yml
|
||||
recursive-include awx/lib/site-packages *
|
||||
recursive-include awx/plugins *.ps1
|
||||
recursive-include requirements *.txt
|
||||
recursive-include requirements *.yml
|
||||
recursive-include config *
|
||||
recursive-include docs/licenses *
|
||||
recursive-exclude awx devonly.py*
|
||||
|
||||
6
Makefile
6
Makefile
@ -209,7 +209,11 @@ requirements_awx: virtualenv_awx
|
||||
requirements_awx_dev:
|
||||
$(VENV_BASE)/awx/bin/pip install -r requirements/requirements_dev.txt
|
||||
|
||||
requirements: requirements_ansible requirements_awx
|
||||
requirements_collections:
|
||||
mkdir -p $(COLLECTION_BASE)
|
||||
ansible-galaxy collection install -r requirements/collections_requirements.yml -p $(COLLECTION_BASE)
|
||||
|
||||
requirements: requirements_ansible requirements_awx requirements_collections
|
||||
|
||||
requirements_dev: requirements_awx requirements_ansible_py3 requirements_awx_dev requirements_ansible_dev
|
||||
|
||||
|
||||
@ -169,7 +169,7 @@ class AnsibleInventoryLoader(object):
|
||||
self.tmp_private_dir = build_proot_temp_dir()
|
||||
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
|
||||
kwargs['proot_temp_dir'] = self.tmp_private_dir
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source)]
|
||||
kwargs['proot_show_paths'] = [functioning_dir(self.source), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
if self.venv_path != settings.ANSIBLE_VENV_PATH:
|
||||
|
||||
@ -1612,6 +1612,11 @@ class PluginFileInjector(object):
|
||||
# base injector should be one of None, "managed", or "template"
|
||||
# this dictates which logic to borrow from playbook injectors
|
||||
base_injector = None
|
||||
# every source should have collection, but these are set here
|
||||
# so that a source without a collection will have null values
|
||||
namespace = None
|
||||
collection = None
|
||||
collection_migration = '2.9' # Starting with this version, we use collections
|
||||
|
||||
def __init__(self, ansible_version):
|
||||
# This is InventoryOptions instance, could be source or inventory update
|
||||
@ -1638,7 +1643,11 @@ class PluginFileInjector(object):
|
||||
"""
|
||||
if self.plugin_name is None:
|
||||
raise NotImplementedError('At minimum the plugin name is needed for inventory plugin use.')
|
||||
return {'plugin': self.plugin_name}
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
proper_name = f'{self.namespace}.{self.collection}.{self.plugin_name}'
|
||||
else:
|
||||
proper_name = self.plugin_name
|
||||
return {'plugin': proper_name}
|
||||
|
||||
def inventory_contents(self, inventory_update, private_data_dir):
|
||||
"""Returns a string that is the content for the inventory file for the inventory plugin
|
||||
@ -1693,7 +1702,10 @@ class PluginFileInjector(object):
|
||||
return injected_env
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
return self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
if self.initial_version is None or Version(self.ansible_version) >= Version(self.collection_migration):
|
||||
env['ANSIBLE_COLLECTIONS_PATHS'] = settings.INVENTORY_COLLECTIONS_ROOT
|
||||
return env
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
injected_env = self._get_shared_env(inventory_update, private_data_dir, private_data_files)
|
||||
@ -1738,6 +1750,8 @@ class azure_rm(PluginFileInjector):
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars, host names
|
||||
ini_env_reference = 'AZURE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'azure'
|
||||
collection = 'azcollection'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(azure_rm, self).get_plugin_env(*args, **kwargs)
|
||||
@ -1869,9 +1883,11 @@ class azure_rm(PluginFileInjector):
|
||||
class ec2(PluginFileInjector):
|
||||
plugin_name = 'aws_ec2'
|
||||
# blocked by https://github.com/ansible/ansible/issues/54059
|
||||
# initial_version = '2.8' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
initial_version = '2.9' # Driven by unsafe group names issue, parent_group templating, hostvars
|
||||
ini_env_reference = 'EC2_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'amazon'
|
||||
collection = 'aws'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(ec2, self).get_plugin_env(*args, **kwargs)
|
||||
@ -2011,6 +2027,9 @@ class ec2(PluginFileInjector):
|
||||
grouping_data['key'] += ' | regex_replace("{rx}", "_")'.format(rx=legacy_regex)
|
||||
# end compatibility content
|
||||
|
||||
if source_vars.get('iam_role_arn', None):
|
||||
ret['iam_role_arn'] = source_vars['iam_role_arn']
|
||||
|
||||
# This was an allowed ec2.ini option, also plugin option, so pass through
|
||||
if source_vars.get('boto_profile', None):
|
||||
ret['boto_profile'] = source_vars['boto_profile']
|
||||
@ -2019,6 +2038,10 @@ class ec2(PluginFileInjector):
|
||||
# Using the plugin, but still want dashes whitelisted
|
||||
ret['use_contrib_script_compatible_sanitization'] = True
|
||||
|
||||
if source_vars.get('nested_groups') is False:
|
||||
for this_keyed_group in keyed_groups:
|
||||
this_keyed_group.pop('parent_group', None)
|
||||
|
||||
if keyed_groups:
|
||||
ret['keyed_groups'] = keyed_groups
|
||||
|
||||
@ -2030,18 +2053,35 @@ class ec2(PluginFileInjector):
|
||||
compose_dict.update(self._compat_compose_vars())
|
||||
# plugin provides "aws_ec2", but not this which the script gave
|
||||
ret['groups'] = {'ec2': True}
|
||||
# public_ip as hostname is non-default plugin behavior, script behavior
|
||||
ret['hostnames'] = [
|
||||
'network-interface.addresses.association.public-ip',
|
||||
'dns-name',
|
||||
'private-dns-name'
|
||||
]
|
||||
if source_vars.get('hostname_variable') is not None:
|
||||
hnames = []
|
||||
for expr in source_vars.get('hostname_variable').split(','):
|
||||
if expr == 'public_dns_name':
|
||||
hnames.append('dns-name')
|
||||
elif not expr.startswith('tag:') and '_' in expr:
|
||||
hnames.append(expr.replace('_', '-'))
|
||||
else:
|
||||
hnames.append(expr)
|
||||
ret['hostnames'] = hnames
|
||||
else:
|
||||
# public_ip as hostname is non-default plugin behavior, script behavior
|
||||
ret['hostnames'] = [
|
||||
'network-interface.addresses.association.public-ip',
|
||||
'dns-name',
|
||||
'private-dns-name'
|
||||
]
|
||||
# The script returned only running state by default, the plugin does not
|
||||
# https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
|
||||
# options: pending | running | shutting-down | terminated | stopping | stopped
|
||||
inst_filters['instance-state-name'] = ['running']
|
||||
# end compatibility content
|
||||
|
||||
if source_vars.get('destination_variable') or source_vars.get('vpc_destination_variable'):
|
||||
for fd in ('destination_variable', 'vpc_destination_variable'):
|
||||
if source_vars.get(fd):
|
||||
compose_dict['ansible_host'] = source_vars.get(fd)
|
||||
break
|
||||
|
||||
if compose_dict:
|
||||
ret['compose'] = compose_dict
|
||||
|
||||
@ -2108,6 +2148,8 @@ class gce(PluginFileInjector):
|
||||
initial_version = '2.8' # Driven by unsafe group names issue, hostvars
|
||||
ini_env_reference = 'GCE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'google'
|
||||
collection = 'cloud'
|
||||
|
||||
def get_plugin_env(self, *args, **kwargs):
|
||||
ret = super(gce, self).get_plugin_env(*args, **kwargs)
|
||||
@ -2208,14 +2250,112 @@ class gce(PluginFileInjector):
|
||||
|
||||
|
||||
class vmware(PluginFileInjector):
|
||||
# plugin_name = 'vmware_vm_inventory' # FIXME: implement me
|
||||
plugin_name = 'vmware_vm_inventory'
|
||||
# initial_version = '2.9' # Ready 4/22/2020, waiting for release
|
||||
ini_env_reference = 'VMWARE_INI_PATH'
|
||||
base_injector = 'managed'
|
||||
namespace = 'community'
|
||||
collection = 'vmware'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
return 'vmware_inventory.py' # exception
|
||||
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(vmware, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['strict'] = False
|
||||
# Documentation of props, see
|
||||
# https://github.com/ansible/ansible/blob/devel/docs/docsite/rst/scenario_guides/vmware_scenarios/vmware_inventory_vm_attributes.rst
|
||||
UPPERCASE_PROPS = [
|
||||
"ansible_ssh_host",
|
||||
"ansible_host",
|
||||
"ansible_uuid",
|
||||
"availableField",
|
||||
"configIssue",
|
||||
"configStatus",
|
||||
"customValue", # optional
|
||||
"datastore",
|
||||
"effectiveRole",
|
||||
"guestHeartbeatStatus", # optonal
|
||||
"layout", # optional
|
||||
"layoutEx", # optional
|
||||
"name",
|
||||
"network",
|
||||
"overallStatus",
|
||||
"parentVApp", # optional
|
||||
"permission",
|
||||
"recentTask",
|
||||
"resourcePool",
|
||||
"rootSnapshot",
|
||||
"snapshot", # optional
|
||||
"tag",
|
||||
"triggeredAlarmState",
|
||||
"value"
|
||||
]
|
||||
NESTED_PROPS = [
|
||||
"capability",
|
||||
"config",
|
||||
"guest",
|
||||
"runtime",
|
||||
"storage",
|
||||
"summary", # repeat of other properties
|
||||
]
|
||||
ret['properties'] = UPPERCASE_PROPS + NESTED_PROPS
|
||||
ret['compose'] = {'ansible_host': 'guest.ipAddress'} # default value
|
||||
ret['compose']['ansible_ssh_host'] = ret['compose']['ansible_host']
|
||||
# the ansible_uuid was unique every host, every import, from the script
|
||||
ret['compose']['ansible_uuid'] = '99999999 | random | to_uuid'
|
||||
for prop in UPPERCASE_PROPS:
|
||||
if prop == prop.lower():
|
||||
continue
|
||||
ret['compose'][prop.lower()] = prop
|
||||
ret['with_nested_properties'] = True
|
||||
# ret['property_name_format'] = 'lower_case' # only dacrystal/topic/vmware-inventory-plugin-property-format
|
||||
|
||||
# process custom options
|
||||
vmware_opts = dict(inventory_update.source_vars_dict.items())
|
||||
if inventory_update.instance_filters:
|
||||
vmware_opts.setdefault('host_filters', inventory_update.instance_filters)
|
||||
if inventory_update.group_by:
|
||||
vmware_opts.setdefault('groupby_patterns', inventory_update.group_by)
|
||||
|
||||
alias_pattern = vmware_opts.get('alias_pattern')
|
||||
if alias_pattern:
|
||||
ret.setdefault('hostnames', [])
|
||||
for alias in alias_pattern.split(','): # make best effort
|
||||
striped_alias = alias.replace('{', '').replace('}', '').strip() # make best effort
|
||||
if not striped_alias:
|
||||
continue
|
||||
ret['hostnames'].append(striped_alias)
|
||||
|
||||
host_pattern = vmware_opts.get('host_pattern') # not working in script
|
||||
if host_pattern:
|
||||
stripped_hp = host_pattern.replace('{', '').replace('}', '').strip() # make best effort
|
||||
ret['compose']['ansible_host'] = stripped_hp
|
||||
ret['compose']['ansible_ssh_host'] = stripped_hp
|
||||
|
||||
host_filters = vmware_opts.get('host_filters')
|
||||
if host_filters:
|
||||
ret.setdefault('filters', [])
|
||||
for hf in host_filters.split(','):
|
||||
striped_hf = hf.replace('{', '').replace('}', '').strip() # make best effort
|
||||
if not striped_hf:
|
||||
continue
|
||||
ret['filters'].append(striped_hf)
|
||||
|
||||
groupby_patterns = vmware_opts.get('groupby_patterns')
|
||||
if groupby_patterns:
|
||||
ret.setdefault('keyed_groups', [])
|
||||
for pattern in groupby_patterns.split(','):
|
||||
stripped_pattern = pattern.replace('{', '').replace('}', '').strip() # make best effort
|
||||
ret['keyed_groups'].append({
|
||||
'prefix': '', 'separator': '',
|
||||
'key': stripped_pattern
|
||||
})
|
||||
|
||||
return ret
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
@ -2246,6 +2386,8 @@ class openstack(PluginFileInjector):
|
||||
plugin_name = 'openstack'
|
||||
# minimum version of 2.7.8 may be theoretically possible
|
||||
initial_version = '2.8' # Driven by consistency with other sources
|
||||
namespace = 'openstack'
|
||||
collection = 'cloud'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
@ -2297,7 +2439,10 @@ class openstack(PluginFileInjector):
|
||||
return self.build_script_private_data(inventory_update, private_data_dir, mk_cache=False)
|
||||
|
||||
def get_plugin_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
return self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env = super(openstack, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
script_env = self.get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
env.update(script_env)
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
def use_host_name_for_name(a_bool_maybe):
|
||||
@ -2309,12 +2454,10 @@ class openstack(PluginFileInjector):
|
||||
else:
|
||||
return 'uuid'
|
||||
|
||||
ret = dict(
|
||||
plugin=self.plugin_name,
|
||||
fail_on_errors=True,
|
||||
expand_hostvars=True,
|
||||
inventory_hostname=use_host_name_for_name(False),
|
||||
)
|
||||
ret = super(openstack, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
ret['fail_on_errors'] = True
|
||||
ret['expand_hostvars'] = True
|
||||
ret['inventory_hostname'] = use_host_name_for_name(False)
|
||||
# Note: mucking with defaults will break import integrity
|
||||
# For the plugin, we need to use the same defaults as the old script
|
||||
# or else imports will conflict. To find script defaults you have
|
||||
@ -2339,8 +2482,10 @@ class openstack(PluginFileInjector):
|
||||
class rhv(PluginFileInjector):
|
||||
"""ovirt uses the custom credential templating, and that is all
|
||||
"""
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
plugin_name = 'ovirt'
|
||||
base_injector = 'template'
|
||||
namespace = 'ovirt'
|
||||
collection = 'ovirt_collection'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
@ -2350,8 +2495,10 @@ class rhv(PluginFileInjector):
|
||||
class satellite6(PluginFileInjector):
|
||||
plugin_name = 'foreman'
|
||||
ini_env_reference = 'FOREMAN_INI_PATH'
|
||||
# initial_version = '2.8' # FIXME: turn on after plugin is validated
|
||||
initial_version = '2.9'
|
||||
# No base injector, because this does not work in playbooks. Bug??
|
||||
namespace = 'theforeman'
|
||||
collection = 'foreman'
|
||||
|
||||
@property
|
||||
def script_name(self):
|
||||
@ -2413,18 +2560,51 @@ class satellite6(PluginFileInjector):
|
||||
# this assumes that this is merged
|
||||
# https://github.com/ansible/ansible/pull/52693
|
||||
credential = inventory_update.get_cloud_credential()
|
||||
ret = {}
|
||||
ret = super(satellite6, self).get_plugin_env(inventory_update, private_data_dir, private_data_files)
|
||||
if credential:
|
||||
ret['FOREMAN_SERVER'] = credential.get_input('host', default='')
|
||||
ret['FOREMAN_USER'] = credential.get_input('username', default='')
|
||||
ret['FOREMAN_PASSWORD'] = credential.get_input('password', default='')
|
||||
return ret
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(satellite6, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
|
||||
# Compatibility content
|
||||
group_by_hostvar = {
|
||||
"environment": {"prefix": "foreman_environment_",
|
||||
"separator": "",
|
||||
"key": "foreman['environment_name'] | lower | regex_replace(' ', '') | "
|
||||
"regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')"}, # NOQA: W605
|
||||
"location": {"prefix": "foreman_location_",
|
||||
"separator": "",
|
||||
"key": "foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"organization": {"prefix": "foreman_organization_",
|
||||
"separator": "",
|
||||
"key": "foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"lifecycle_environment": {"prefix": "foreman_lifecycle_environment_",
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['lifecycle_environment_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"},
|
||||
"content_view": {"prefix": "foreman_content_view_",
|
||||
"separator": "",
|
||||
"key": "foreman['content_facet_attributes']['content_view_name'] | "
|
||||
"lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')"}
|
||||
}
|
||||
ret['keyed_groups'] = [group_by_hostvar[grouping_name] for grouping_name in group_by_hostvar]
|
||||
ret['legacy_hostvars'] = True
|
||||
ret['want_facts'] = True
|
||||
ret['want_params'] = True
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
class cloudforms(PluginFileInjector):
|
||||
# plugin_name = 'FIXME' # contribute inventory plugin to Ansible
|
||||
ini_env_reference = 'CLOUDFORMS_INI_PATH'
|
||||
# Also no base_injector because this does not work in playbooks
|
||||
# namespace = '' # does not have a collection
|
||||
# collection = ''
|
||||
|
||||
def build_script_private_data(self, inventory_update, private_data_dir):
|
||||
cp = configparser.RawConfigParser()
|
||||
@ -2460,6 +2640,8 @@ class tower(PluginFileInjector):
|
||||
plugin_name = 'tower'
|
||||
base_injector = 'template'
|
||||
initial_version = '2.8' # Driven by "include_metadata" hostvars
|
||||
namespace = 'awx'
|
||||
collection = 'awx'
|
||||
|
||||
def get_script_env(self, inventory_update, private_data_dir, private_data_files):
|
||||
env = super(tower, self).get_script_env(inventory_update, private_data_dir, private_data_files)
|
||||
@ -2468,6 +2650,7 @@ class tower(PluginFileInjector):
|
||||
return env
|
||||
|
||||
def inventory_as_dict(self, inventory_update, private_data_dir):
|
||||
ret = super(tower, self).inventory_as_dict(inventory_update, private_data_dir)
|
||||
# Credentials injected as env vars, same as script
|
||||
try:
|
||||
# plugin can take an actual int type
|
||||
@ -2475,11 +2658,9 @@ class tower(PluginFileInjector):
|
||||
except ValueError:
|
||||
# inventory_id could be a named URL
|
||||
identifier = iri_to_uri(inventory_update.instance_filters)
|
||||
return {
|
||||
'plugin': self.plugin_name,
|
||||
'inventory_id': identifier,
|
||||
'include_metadata': True # used for license check
|
||||
}
|
||||
ret['inventory_id'] = identifier
|
||||
ret['include_metadata'] = True # used for license check
|
||||
return ret
|
||||
|
||||
|
||||
for cls in PluginFileInjector.__subclasses__():
|
||||
|
||||
@ -2407,7 +2407,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
|
||||
@property
|
||||
def proot_show_paths(self):
|
||||
return [self.get_path_to('..', 'plugins', 'inventory')]
|
||||
return [self.get_path_to('..', 'plugins', 'inventory'), settings.INVENTORY_COLLECTIONS_ROOT]
|
||||
|
||||
def build_private_data(self, inventory_update, private_data_dir):
|
||||
"""
|
||||
|
||||
@ -39,5 +39,5 @@ keyed_groups:
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plain_host_names: true
|
||||
plugin: azure_rm
|
||||
plugin: azure.azcollection.azure_rm
|
||||
use_contrib_script_compatible_sanitization: true
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
boto_profile: /tmp/my_boto_stuff
|
||||
compose:
|
||||
ansible_host: public_ip_address
|
||||
ansible_host: public_dns_name
|
||||
ec2_account_id: owner_id
|
||||
ec2_ami_launch_index: ami_launch_index | string
|
||||
ec2_architecture: architecture
|
||||
@ -50,9 +50,8 @@ filters:
|
||||
groups:
|
||||
ec2: true
|
||||
hostnames:
|
||||
- network-interface.addresses.association.public-ip
|
||||
- dns-name
|
||||
- private-dns-name
|
||||
iam_role_arn: arn:aws:iam::123456789012:role/test-role
|
||||
keyed_groups:
|
||||
- key: placement.availability_zone
|
||||
parent_group: zones
|
||||
@ -75,7 +74,7 @@ keyed_groups:
|
||||
parent_group: '{{ placement.region }}'
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: aws_ec2
|
||||
plugin: amazon.aws.aws_ec2
|
||||
regions:
|
||||
- us-east-2
|
||||
- ap-south-1
|
||||
|
||||
@ -40,7 +40,7 @@ keyed_groups:
|
||||
- key: image
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: gcp_compute
|
||||
plugin: google.cloud.gcp_compute
|
||||
projects:
|
||||
- fooo
|
||||
retrieve_image_info: true
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
expand_hostvars: true
|
||||
fail_on_errors: true
|
||||
inventory_hostname: uuid
|
||||
plugin: openstack
|
||||
plugin: openstack.cloud.openstack
|
||||
|
||||
7
awx/main/tests/data/inventory/plugins/rhv/env.json
Normal file
7
awx/main/tests/data/inventory/plugins/rhv/env.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"OVIRT_INI_PATH": "{{ file_reference }}",
|
||||
"OVIRT_PASSWORD": "fooo",
|
||||
"OVIRT_URL": "https://foo.invalid",
|
||||
"OVIRT_USERNAME": "fooo"
|
||||
}
|
||||
@ -0,0 +1,5 @@
|
||||
[ovirt]
|
||||
ovirt_url=https://foo.invalid
|
||||
ovirt_username=fooo
|
||||
ovirt_password=fooo
|
||||
ovirt_ca_file=fooo
|
||||
@ -0,0 +1 @@
|
||||
plugin: ovirt.ovirt_collection.ovirt
|
||||
@ -1 +1,20 @@
|
||||
plugin: foreman
|
||||
keyed_groups:
|
||||
- key: foreman['environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_') | regex_replace('none', '')
|
||||
prefix: foreman_environment_
|
||||
separator: ''
|
||||
- key: foreman['location_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_location_
|
||||
separator: ''
|
||||
- key: foreman['organization_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_organization_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['lifecycle_environment_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_lifecycle_environment_
|
||||
separator: ''
|
||||
- key: foreman['content_facet_attributes']['content_view_name'] | lower | regex_replace(' ', '') | regex_replace('[^A-Za-z0-9\_]', '_')
|
||||
prefix: foreman_content_view_
|
||||
separator: ''
|
||||
legacy_hostvars: true
|
||||
plugin: theforeman.foreman.foreman
|
||||
want_facts: true
|
||||
want_params: true
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
include_metadata: true
|
||||
inventory_id: 42
|
||||
plugin: tower
|
||||
plugin: awx.awx.tower
|
||||
|
||||
7
awx/main/tests/data/inventory/plugins/vmware/env.json
Normal file
7
awx/main/tests/data/inventory/plugins/vmware/env.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS": "never",
|
||||
"VMWARE_HOST": "https://foo.invalid",
|
||||
"VMWARE_PASSWORD": "fooo",
|
||||
"VMWARE_USER": "fooo",
|
||||
"VMWARE_VALIDATE_CERTS": "False"
|
||||
}
|
||||
@ -0,0 +1,59 @@
|
||||
compose:
|
||||
ansible_host: guest.ipAddress
|
||||
ansible_ssh_host: guest.ipAddress
|
||||
ansible_uuid: 99999999 | random | to_uuid
|
||||
availablefield: availableField
|
||||
configissue: configIssue
|
||||
configstatus: configStatus
|
||||
customvalue: customValue
|
||||
effectiverole: effectiveRole
|
||||
guestheartbeatstatus: guestHeartbeatStatus
|
||||
layoutex: layoutEx
|
||||
overallstatus: overallStatus
|
||||
parentvapp: parentVApp
|
||||
recenttask: recentTask
|
||||
resourcepool: resourcePool
|
||||
rootsnapshot: rootSnapshot
|
||||
triggeredalarmstate: triggeredAlarmState
|
||||
filters:
|
||||
- config.zoo == "DC0_H0_VM0"
|
||||
hostnames:
|
||||
- config.foo
|
||||
keyed_groups:
|
||||
- key: config.asdf
|
||||
prefix: ''
|
||||
separator: ''
|
||||
plugin: community.vmware.vmware_vm_inventory
|
||||
properties:
|
||||
- ansible_ssh_host
|
||||
- ansible_host
|
||||
- ansible_uuid
|
||||
- availableField
|
||||
- configIssue
|
||||
- configStatus
|
||||
- customValue
|
||||
- datastore
|
||||
- effectiveRole
|
||||
- guestHeartbeatStatus
|
||||
- layout
|
||||
- layoutEx
|
||||
- name
|
||||
- network
|
||||
- overallStatus
|
||||
- parentVApp
|
||||
- permission
|
||||
- recentTask
|
||||
- resourcePool
|
||||
- rootSnapshot
|
||||
- snapshot
|
||||
- tag
|
||||
- triggeredAlarmState
|
||||
- value
|
||||
- capability
|
||||
- config
|
||||
- guest
|
||||
- runtime
|
||||
- storage
|
||||
- summary
|
||||
strict: false
|
||||
with_nested_properties: true
|
||||
@ -1,9 +1,11 @@
|
||||
[ec2]
|
||||
base_source_var = value_of_var
|
||||
boto_profile = /tmp/my_boto_stuff
|
||||
iam_role_arn = arn:aws:iam::123456789012:role/test-role
|
||||
hostname_variable = public_dns_name
|
||||
destination_variable = public_dns_name
|
||||
regions = us-east-2,ap-south-1
|
||||
regions_exclude = us-gov-west-1,cn-north-1
|
||||
destination_variable = public_dns_name
|
||||
vpc_destination_variable = ip_address
|
||||
route53 = False
|
||||
all_instances = True
|
||||
|
||||
@ -5,6 +5,7 @@ username = fooo
|
||||
password = fooo
|
||||
server = https://foo.invalid
|
||||
base_source_var = value_of_var
|
||||
host_filters = foobaa
|
||||
groupby_patterns = fouo
|
||||
alias_pattern = {{ config.foo }}
|
||||
host_filters = {{ config.zoo == "DC0_H0_VM0" }}
|
||||
groupby_patterns = {{ config.asdf }}
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ DATA = os.path.join(os.path.dirname(data.__file__), 'inventory')
|
||||
|
||||
TEST_SOURCE_FIELDS = {
|
||||
'vmware': {
|
||||
'instance_filters': 'foobaa',
|
||||
'instance_filters': '{{ config.name == "only_my_server" }},{{ somevar == "bar"}}',
|
||||
'group_by': 'fouo'
|
||||
},
|
||||
'ec2': {
|
||||
@ -38,7 +38,10 @@ TEST_SOURCE_FIELDS = {
|
||||
|
||||
INI_TEST_VARS = {
|
||||
'ec2': {
|
||||
'boto_profile': '/tmp/my_boto_stuff'
|
||||
'boto_profile': '/tmp/my_boto_stuff',
|
||||
'iam_role_arn': 'arn:aws:iam::123456789012:role/test-role',
|
||||
'hostname_variable': 'public_dns_name',
|
||||
'destination_variable': 'public_dns_name'
|
||||
},
|
||||
'gce': {},
|
||||
'openstack': {
|
||||
@ -50,6 +53,9 @@ INI_TEST_VARS = {
|
||||
'rhv': {}, # there are none
|
||||
'tower': {}, # there are none
|
||||
'vmware': {
|
||||
'alias_pattern': "{{ config.foo }}",
|
||||
'host_filters': '{{ config.zoo == "DC0_H0_VM0" }}',
|
||||
'groupby_patterns': "{{ config.asdf }}",
|
||||
# setting VMWARE_VALIDATE_CERTS is duplicated with env var
|
||||
},
|
||||
'azure_rm': {
|
||||
@ -315,9 +321,10 @@ def test_inventory_update_injected_content(this_kind, script_or_plugin, inventor
|
||||
with mock.patch('awx.main.models.inventory.PluginFileInjector.should_use_plugin', return_value=use_plugin):
|
||||
# Also do not send websocket status updates
|
||||
with mock.patch.object(UnifiedJob, 'websocket_emit_status', mock.Mock()):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# mocking the licenser is necessary for the tower source
|
||||
with mock.patch('awx.main.models.inventory.get_licenser', mock_licenser):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
with mock.patch.object(task, 'get_ansible_version', return_value='2.13'):
|
||||
# The point of this test is that we replace run with assertions
|
||||
with mock.patch('awx.main.tasks.ansible_runner.interface.run', substitute_run):
|
||||
# mocking the licenser is necessary for the tower source
|
||||
with mock.patch('awx.main.models.inventory.get_licenser', mock_licenser):
|
||||
# so this sets up everything for a run and then yields control over to substitute_run
|
||||
task.run(inventory_update.pk)
|
||||
|
||||
@ -1807,8 +1807,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = mocker.Mock(return_value=None)
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
assert 'AWS_ACCESS_KEY_ID' not in env
|
||||
assert 'AWS_SECRET_ACCESS_KEY' not in env
|
||||
@ -1915,8 +1916,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
inventory_update.get_cloud_credential = get_cred
|
||||
inventory_update.get_extra_credentials = mocker.Mock(return_value=[])
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
safe_env = {}
|
||||
credentials = task.build_credentials_list(inventory_update)
|
||||
@ -2153,8 +2155,9 @@ class TestInventoryUpdateCredentials(TestJobExecution):
|
||||
'satellite6_want_facts': False
|
||||
}
|
||||
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
with mocker.patch('awx.main.tasks._get_ansible_version', mocker.MagicMock(return_value='2.7')):
|
||||
private_data_files = task.build_private_data_files(inventory_update, private_data_dir)
|
||||
env = task.build_env(inventory_update, private_data_dir, False, private_data_files)
|
||||
|
||||
config = configparser.ConfigParser()
|
||||
config.read(env['FOREMAN_INI_PATH'])
|
||||
|
||||
@ -120,6 +120,10 @@ LOGIN_URL = '/api/login/'
|
||||
# This directory should not be web-accessible.
|
||||
PROJECTS_ROOT = os.path.join(BASE_DIR, 'projects')
|
||||
|
||||
# Absolute filesystem path to the directory to host collections for
|
||||
# running inventory imports
|
||||
INVENTORY_COLLECTIONS_ROOT = os.path.join(BASE_DIR, 'vendor', 'inventory_collections')
|
||||
|
||||
# Absolute filesystem path to the directory for job status stdout (default for
|
||||
# development and tests, default for production defined in production.py). This
|
||||
# directory should not be web-accessible
|
||||
|
||||
@ -52,6 +52,9 @@ if "pytest" in sys.modules:
|
||||
# This directory should NOT be web-accessible.
|
||||
PROJECTS_ROOT = '/var/lib/awx/projects/'
|
||||
|
||||
# Location for cross-development of inventory plugins
|
||||
# INVENTORY_COLLECTIONS_ROOT = '/awx_devel/awx/plugins/collections'
|
||||
|
||||
# Absolute filesystem path to the directory for job status stdout
|
||||
# This directory should not be web-accessible
|
||||
JOBOUTPUT_ROOT = os.path.join(BASE_DIR, 'job_status')
|
||||
|
||||
@ -1,8 +1,12 @@
|
||||
# Transition to Ansible Inventory Plugins
|
||||
|
||||
Inventory updates have changed from using scripts, which are vendored as executable Python scripts, to using dynamically-generated YAML files which conform to the specifications of the `auto` inventory plugin. These are then parsed by their respective inventory plugin.
|
||||
Inventory updates have changed from using deprecated inventory scripts, to using dynamically-generated YAML files which are parsed by their respective inventory plugin.
|
||||
|
||||
The major organizational change is that the inventory plugins are part of the Ansible core distribution, whereas the same logic used to be a part of AWX source.
|
||||
In Ansible 2.8, the inventory plugins which are part of the Ansible core distribution were used.
|
||||
This only applied to a few select sources.
|
||||
|
||||
In all other circumstances, inventory imports make use of the inventory plugin from vendored collections.
|
||||
Those collections are downloaded from Ansible Galaxy at the time of packaging building the container image.
|
||||
|
||||
|
||||
## Prior Background for Transition
|
||||
@ -14,11 +18,10 @@ AWX used to maintain logic that parsed `.ini` inventory file contents, in additi
|
||||
|
||||
The CLI entry point `ansible-inventory` was introduced in Ansible 2.4. In Tower 3.2, inventory imports began running this command as an intermediary between the inventory and the import's logic to save content to the database. Using `ansible-inventory` eliminates the need to maintain source-specific logic, relying on Ansible's code instead. This also enables consistent data structure output from `ansible-inventory`. There are many valid structures that a script can provide, but the output from `ansible-inventory` will always be the same, thus the AWX logic to parse the content is simplified. This is why even scripts must be ran through the `ansible-inventory` CLI.
|
||||
|
||||
Along with this switchover, a backported version of `ansible-inventory` was provided, which supports Ansible versions 2.2 and 2.3.
|
||||
|
||||
|
||||
### Removal of Backport
|
||||
|
||||
Along with the `ansible-inventory` switchover, a backported version of `ansible-inventory` was provided, which supported Ansible versions 2.2 and 2.3.
|
||||
In AWX 3.0.0 (and Tower 3.5), the backport of `ansible-inventory` was removed, and support for using custom virtual environments was added. This set the minimum version of Ansible necessary to run _any_ inventory update to 2.4.
|
||||
|
||||
|
||||
@ -30,9 +33,21 @@ In AWX 4.0.0 (and Tower 3.5) inventory source types start to switch over to plug
|
||||
|
||||
To see in which version the plugin transition will happen, see `awx/main/models/inventory.py` and look for the source name as a subclass of `PluginFileInjector`, and there should be an `initial_version`, which is the first version that was deemed (via testing) to have sufficient parity in the content for its inventory plugin returns. For example, `openstack` will begin using the inventory plugin in Ansible version 2.8. If you run an OpenStack inventory update with Ansible 2.7.x or lower, it will use the script.
|
||||
|
||||
The eventual goal is for all source types to have moved to plugins. For any given source, after the `initial_version` for plugin use is higher than the lowest supported Ansible version, the script can be removed and the logic for script credential injection will also be removed.
|
||||
At some point, scripts will be removed and the script-related (for credentials and configuration) logic will also be removed.
|
||||
|
||||
For example, after AWX no longer supports Ansible 2.7, the script `awx/plugins/openstack_inventory.py` will be removed.
|
||||
|
||||
### Management of Collections
|
||||
|
||||
Collections are used for inventory imports starting in Ansible 2.9, and each collection has its own versioning independently from Ansible.
|
||||
Versions for those collections are set in the requirements file `requirements/collections_requirements.yml`.
|
||||
|
||||
The location of vendored collections is set by the file-only setting `INVENTORY_COLLECTIONS_ROOT`.
|
||||
For development purposes, this can be changed so that you can test against development versions of those collections.
|
||||
Instructions for doing this are in `tools/collections`.
|
||||
|
||||
If, for some reason, you need to change the version of a particular collection used in inventory imports,
|
||||
you can use the `ansible-galaxy` tool to update the collection inside of the `INVENTORY_COLLECTIONS_ROOT`.
|
||||
Note that the logic for building the inventory file is written and tested only for the version pinned in the requirements file.
|
||||
|
||||
|
||||
## Changes to Expect in Imports
|
||||
@ -54,11 +69,6 @@ More `hostvars` will appear if the inventory plugins are used. To maintain backw
|
||||
A small number of `hostvars` will be lost because of general deprecation needs.
|
||||
|
||||
|
||||
#### Host Names
|
||||
|
||||
In many cases, the host names will change. In all cases, accurate host tracking will still be maintained via the host `instance_id`.
|
||||
|
||||
|
||||
## Writing Your Own Inventory File
|
||||
|
||||
If you do not want any of this compatibility-related functionality, then you can add an SCM inventory source that points to your own file. You can also apply a credential of a `managed_by_tower` type to that inventory source that matches the credential you are using, as long as it is not `gce` or `openstack`.
|
||||
|
||||
@ -14,6 +14,8 @@ STATIC_ROOT = '/var/lib/awx/public/static'
|
||||
|
||||
PROJECTS_ROOT = '/var/lib/awx/projects'
|
||||
|
||||
INVENTORY_COLLECTIONS_ROOT = '/var/lib/awx/vendor/inventory_collections'
|
||||
|
||||
JOBOUTPUT_ROOT = '/var/lib/awx/job_status'
|
||||
|
||||
SECRET_KEY = get_secret()
|
||||
|
||||
@ -74,8 +74,10 @@ ADD requirements/requirements_ansible.txt \
|
||||
requirements/requirements.txt \
|
||||
requirements/requirements_tower_uninstall.txt \
|
||||
requirements/requirements_git.txt \
|
||||
requirements/collections_requirements.yml \
|
||||
/tmp/requirements/
|
||||
RUN cd /tmp && VENV_BASE="/var/lib/awx/venv" make requirements_awx requirements_ansible_py3
|
||||
RUN cd /tmp && COLLECTION_BASE="/var/lib/awx/vendor/inventory_collections" make requirements_collections
|
||||
|
||||
COPY {{ awx_sdist_file }} /tmp/{{ awx_sdist_file }}
|
||||
RUN echo "{{ awx_version }}" > /var/lib/awx/.tower_version && \
|
||||
|
||||
@ -153,6 +153,7 @@ data:
|
||||
|
||||
STATIC_ROOT = '/var/lib/awx/public/static'
|
||||
PROJECTS_ROOT = '/var/lib/awx/projects'
|
||||
INVENTORY_COLLECTIONS_ROOT = '/var/lib/awx/vendor/inventory_collections'
|
||||
JOBOUTPUT_ROOT = '/var/lib/awx/job_status'
|
||||
SECRET_KEY = open('/etc/tower/SECRET_KEY', 'rb').read().strip()
|
||||
ALLOWED_HOSTS = ['*']
|
||||
|
||||
18
requirements/collections_requirements.yml
Normal file
18
requirements/collections_requirements.yml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
collections:
|
||||
- name: awx.awx
|
||||
version: 9.3.0
|
||||
- name: azure.azcollection
|
||||
version: 0.1.1 # questionable https://github.com/ansible-collections/azure/issues/55
|
||||
- name: amazon.aws
|
||||
version: 0.1.1 # version 0.1.0 seems to have gone missing
|
||||
- name: theforeman.foreman
|
||||
version: 0.7.0 # contains the inventory plugin, but more patches are needed
|
||||
- name: google.cloud
|
||||
version: 0.0.9 # contains PR 167, should be good to go
|
||||
- name: openstack.cloud
|
||||
version: 0.0.1-dev85 # earlier had checksum mismatch
|
||||
- name: community.vmware
|
||||
version: 0.3.1-dev1
|
||||
- name: ovirt.ovirt_collection
|
||||
version: 1.0.1 # new fix published, should be good to go
|
||||
34
tools/collections/README.md
Normal file
34
tools/collections/README.md
Normal file
@ -0,0 +1,34 @@
|
||||
### Inventory Updates Cross-Development with Collections
|
||||
|
||||
Inventory updates in production use vendored collections baked into the image,
|
||||
which are downloaded from Ansible Galaxy in the build steps.
|
||||
|
||||
This gives instructions to short-circuit that process for a faster development process.
|
||||
|
||||
Running this script will do a `git clone` for all the relevant collections
|
||||
into the folder `awx/plugins/collections`.
|
||||
|
||||
```
|
||||
source tools/collections/clone_vendor.sh
|
||||
```
|
||||
|
||||
After this is completed, you must change the path where the server looks
|
||||
for the vendored inventory collections.
|
||||
Add this line to your local settings:
|
||||
|
||||
```
|
||||
INVENTORY_COLLECTIONS_ROOT = '/awx_devel/awx/plugins/collections'
|
||||
```
|
||||
|
||||
Then when you run an inventory update of a particular type, it should
|
||||
use the cloned collection.
|
||||
This allows you to cd into a particular collection, add remotes,
|
||||
change branches, etc.
|
||||
|
||||
#### Extra Build Steps
|
||||
|
||||
This will not work correctly in all circumstances.
|
||||
Some collections make changes at build-time.
|
||||
|
||||
In particular, the foreman inventory plugin needs the NAME attribute changed to
|
||||
the fully-qualified collection name, and will fail if this is not done.
|
||||
71
tools/collections/clone_vendor.sh
Normal file
71
tools/collections/clone_vendor.sh
Normal file
@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
base_dir=awx/plugins/collections/ansible_collections
|
||||
|
||||
if [ ! -d "$base_dir/azure/azcollection" ]
|
||||
then
|
||||
mkdir -p $base_dir/azure
|
||||
git clone https://github.com/ansible-collections/azure.git $base_dir/azure/azcollection
|
||||
else
|
||||
echo "Azure collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/ansible/amazon" ]
|
||||
then
|
||||
mkdir -p $base_dir/ansible
|
||||
git clone https://github.com/ansible-collections/ansible.amazon.git $base_dir/ansible/amazon
|
||||
else
|
||||
echo "Amazon collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/theforeman/foreman" ]
|
||||
then
|
||||
mkdir -p $base_dir/theforeman
|
||||
git clone https://github.com/theforeman/foreman-ansible-modules.git $base_dir/theforeman/foreman
|
||||
else
|
||||
echo "foreman collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/google/cloud" ]
|
||||
then
|
||||
mkdir -p $base_dir/google
|
||||
git clone https://github.com/ansible-collections/ansible_collections_google.git $base_dir/google/cloud
|
||||
else
|
||||
echo "google collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/openstack/cloud" ]
|
||||
then
|
||||
mkdir -p $base_dir/openstack
|
||||
git clone https://github.com/openstack/ansible-collections-openstack.git $base_dir/openstack/cloud
|
||||
else
|
||||
echo "openstack collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/community/vmware" ]
|
||||
then
|
||||
mkdir -p $base_dir/community
|
||||
git clone https://github.com/ansible-collections/vmware.git $base_dir/community/vmware
|
||||
else
|
||||
echo "VMWare collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/ovirt/ovirt_collection" ]
|
||||
then
|
||||
mkdir -p $base_dir/ovirt
|
||||
git clone $base_dir/ovirt/ovirt_collection
|
||||
else
|
||||
echo "Ovirt collection already exists"
|
||||
fi
|
||||
|
||||
if [ ! -d "$base_dir/awx/awx" ]
|
||||
then
|
||||
mkdir -p $base_dir/awx
|
||||
ln -s $(shell pwd)/awx_collection $base_dir/awx/awx
|
||||
git clone $base_dir/awx/awx
|
||||
else
|
||||
echo "awx collection already exists"
|
||||
fi
|
||||
|
||||
echo "-- confirmation of what is installed --"
|
||||
ANSIBLE_COLLECTIONS_PATHS=awx/plugins/collections ansible-galaxy collection list
|
||||
@ -92,9 +92,12 @@ ADD requirements/requirements.txt \
|
||||
requirements/requirements_dev.txt \
|
||||
requirements/requirements_ansible_uninstall.txt \
|
||||
requirements/requirements_tower_uninstall.txt \
|
||||
requirements/collections_requirements.yml \
|
||||
/tmp/requirements/
|
||||
RUN mkdir -p /venv && chmod g+w /venv
|
||||
RUN cd /tmp && VENV_BASE="/venv" make requirements_dev
|
||||
RUN mkdir -p /vendor/inventory_collections && chmod g+w /vendor/inventory_collections
|
||||
RUN cd /tmp && COLLECTION_BASE="/vendor/inventory_collections" make requirements_collections
|
||||
|
||||
# Use the distro provided npm to bootstrap our required version of node
|
||||
RUN npm install -g n && n 10.15.0 && dnf remove -y nodejs
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user