mirror of
https://github.com/ansible/awx.git
synced 2026-01-11 10:00:01 -03:30
Merge pull request #6120 from AlanCoding/ansible-inventory
Inventory updates through `ansible-inventory`
This commit is contained in:
commit
84b8dcece0
@ -1462,7 +1462,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
class Meta:
|
||||
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
|
||||
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
|
||||
'timeout')
|
||||
'timeout', 'verbosity')
|
||||
|
||||
def get_related(self, obj):
|
||||
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
|
||||
|
||||
@ -2363,7 +2363,7 @@ class InventorySourceUpdateView(RetrieveAPIView):
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
obj = self.get_object()
|
||||
if obj.source == 'file' and obj.scm_project_id is not None:
|
||||
if obj.source == 'scm':
|
||||
raise PermissionDenied(detail=_(
|
||||
'Update the project `{}` in order to update this inventory source.'.format(
|
||||
obj.scm_project.name)))
|
||||
|
||||
@ -2,32 +2,34 @@
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
from optparse import make_option
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
# PyYAML
|
||||
import yaml
|
||||
import shutil
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management.base import NoArgsCommand, CommandError
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.db import connection, transaction
|
||||
from django.utils.encoding import smart_text
|
||||
|
||||
# AWX
|
||||
from awx.main.models import * # noqa
|
||||
from awx.main.task_engine import TaskEnhancer
|
||||
from awx.main.utils import ignore_inventory_computed_fields, check_proot_installed, wrap_args_with_proot
|
||||
from awx.main.utils import (
|
||||
ignore_inventory_computed_fields,
|
||||
check_proot_installed,
|
||||
wrap_args_with_proot,
|
||||
build_proot_temp_dir
|
||||
)
|
||||
from awx.main.utils.mem_inventory import MemInventory, dict_to_mem_data
|
||||
from awx.main.signals import disable_activity_stream
|
||||
|
||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
@ -49,338 +51,111 @@ Demo mode free license count exceeded, would bring available instances to %(new_
|
||||
See http://www.ansible.com/renew for licensing information.'''
|
||||
|
||||
|
||||
class MemObject(object):
|
||||
def functioning_dir(path):
|
||||
if os.path.isdir(path):
|
||||
return path
|
||||
return os.path.dirname(path)
|
||||
|
||||
|
||||
class AnsibleInventoryLoader(object):
|
||||
'''
|
||||
Common code shared between in-memory groups and hosts.
|
||||
Given executable `source` (directory, executable, or file) this will
|
||||
use the ansible-inventory CLI utility to convert it into in-memory
|
||||
representational objects. Example:
|
||||
/usr/bin/ansible/ansible-inventory -i hosts --list
|
||||
If it fails to find this, it uses the backported script instead
|
||||
'''
|
||||
|
||||
def __init__(self, name, source_dir):
|
||||
assert name, 'no name'
|
||||
assert source_dir, 'no source dir'
|
||||
self.name = name
|
||||
self.source_dir = source_dir
|
||||
|
||||
def load_vars(self, base_path):
|
||||
all_vars = {}
|
||||
files_found = 0
|
||||
for suffix in ('', '.yml', '.yaml', '.json'):
|
||||
path = ''.join([base_path, suffix]).encode("utf-8")
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
if not os.path.isfile(path):
|
||||
continue
|
||||
files_found += 1
|
||||
if files_found > 1:
|
||||
raise RuntimeError('Multiple variable files found. There should only be one. %s ' % self.name)
|
||||
vars_name = os.path.basename(os.path.dirname(path))
|
||||
logger.debug('Loading %s from %s', vars_name, path)
|
||||
try:
|
||||
v = yaml.safe_load(file(path, 'r').read())
|
||||
if hasattr(v, 'items'): # is a dict
|
||||
all_vars.update(v)
|
||||
except yaml.YAMLError as e:
|
||||
if hasattr(e, 'problem_mark'):
|
||||
logger.error('Invalid YAML in %s:%s col %s', path,
|
||||
e.problem_mark.line + 1,
|
||||
e.problem_mark.column + 1)
|
||||
else:
|
||||
logger.error('Error loading YAML from %s', path)
|
||||
raise
|
||||
return all_vars
|
||||
|
||||
|
||||
class MemGroup(MemObject):
|
||||
'''
|
||||
In-memory representation of an inventory group.
|
||||
'''
|
||||
|
||||
def __init__(self, name, source_dir):
|
||||
super(MemGroup, self).__init__(name, source_dir)
|
||||
self.children = []
|
||||
self.hosts = []
|
||||
self.variables = {}
|
||||
self.parents = []
|
||||
# Used on the "all" group in place of previous global variables.
|
||||
# maps host and group names to hosts to prevent redudant additions
|
||||
self.all_hosts = {}
|
||||
self.all_groups = {}
|
||||
group_vars = os.path.join(source_dir, 'group_vars', self.name)
|
||||
self.variables = self.load_vars(group_vars)
|
||||
logger.debug('Loaded group: %s', self.name)
|
||||
|
||||
def child_group_by_name(self, name, loader):
|
||||
if name == 'all':
|
||||
return
|
||||
logger.debug('Looking for %s as child group of %s', name, self.name)
|
||||
# slight hack here, passing in 'self' for all_group but child=True won't use it
|
||||
group = loader.get_group(name, self, child=True)
|
||||
if group:
|
||||
# don't add to child groups if already there
|
||||
for g in self.children:
|
||||
if g.name == name:
|
||||
return g
|
||||
logger.debug('Adding child group %s to group %s', group.name, self.name)
|
||||
self.children.append(group)
|
||||
return group
|
||||
|
||||
def add_child_group(self, group):
|
||||
assert group.name is not 'all', 'group name is all'
|
||||
assert isinstance(group, MemGroup), 'not MemGroup instance'
|
||||
logger.debug('Adding child group %s to parent %s', group.name, self.name)
|
||||
if group not in self.children:
|
||||
self.children.append(group)
|
||||
if self not in group.parents:
|
||||
group.parents.append(self)
|
||||
|
||||
def add_host(self, host):
|
||||
assert isinstance(host, MemHost), 'not MemHost instance'
|
||||
logger.debug('Adding host %s to group %s', host.name, self.name)
|
||||
if host not in self.hosts:
|
||||
self.hosts.append(host)
|
||||
|
||||
def debug_tree(self, group_names=None):
|
||||
group_names = group_names or set()
|
||||
if self.name in group_names:
|
||||
return
|
||||
logger.debug('Dumping tree for group "%s":', self.name)
|
||||
logger.debug('- Vars: %r', self.variables)
|
||||
for h in self.hosts:
|
||||
logger.debug('- Host: %s, %r', h.name, h.variables)
|
||||
for g in self.children:
|
||||
logger.debug('- Child: %s', g.name)
|
||||
logger.debug('----')
|
||||
group_names.add(self.name)
|
||||
for g in self.children:
|
||||
g.debug_tree(group_names)
|
||||
|
||||
|
||||
class MemHost(MemObject):
|
||||
'''
|
||||
In-memory representation of an inventory host.
|
||||
'''
|
||||
|
||||
def __init__(self, name, source_dir, port=None):
|
||||
super(MemHost, self).__init__(name, source_dir)
|
||||
self.variables = {}
|
||||
self.instance_id = None
|
||||
self.name = name
|
||||
if port:
|
||||
self.variables['ansible_ssh_port'] = port
|
||||
host_vars = os.path.join(source_dir, 'host_vars', name)
|
||||
self.variables.update(self.load_vars(host_vars))
|
||||
logger.debug('Loaded host: %s', self.name)
|
||||
|
||||
|
||||
class BaseLoader(object):
|
||||
'''
|
||||
Common functions for an inventory loader from a given source.
|
||||
'''
|
||||
|
||||
def __init__(self, source, all_group=None, group_filter_re=None, host_filter_re=None, is_custom=False):
|
||||
def __init__(self, source, group_filter_re=None, host_filter_re=None, is_custom=False):
|
||||
self.source = source
|
||||
self.source_dir = os.path.dirname(self.source)
|
||||
self.all_group = all_group or MemGroup('all', self.source_dir)
|
||||
self.is_custom = is_custom
|
||||
self.tmp_private_dir = None
|
||||
self.method = 'ansible-inventory'
|
||||
self.group_filter_re = group_filter_re
|
||||
self.host_filter_re = host_filter_re
|
||||
self.ipv6_port_re = re.compile(r'^\[([A-Fa-f0-9:]{3,})\]:(\d+?)$')
|
||||
self.is_custom = is_custom
|
||||
|
||||
def get_host(self, name):
|
||||
'''
|
||||
Return a MemHost instance from host name, creating if needed. If name
|
||||
contains brackets, they will NOT be interpreted as a host pattern.
|
||||
'''
|
||||
m = self.ipv6_port_re.match(name)
|
||||
if m:
|
||||
host_name = m.groups()[0]
|
||||
port = int(m.groups()[1])
|
||||
elif name.count(':') == 1:
|
||||
host_name = name.split(':')[0]
|
||||
try:
|
||||
port = int(name.split(':')[1])
|
||||
except (ValueError, UnicodeDecodeError):
|
||||
logger.warning(u'Invalid port "%s" for host "%s"',
|
||||
name.split(':')[1], host_name)
|
||||
port = None
|
||||
def build_env(self):
|
||||
# Use ansible venv if it's available and setup to use
|
||||
env = dict(os.environ.items())
|
||||
if settings.ANSIBLE_USE_VENV:
|
||||
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
|
||||
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
|
||||
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
for python_ver in ["python2.7", "python2.6"]:
|
||||
if os.path.isdir(os.path.join(venv_libdir, python_ver)):
|
||||
env['PYTHONPATH'] = os.path.join(venv_libdir, python_ver, "site-packages") + ":"
|
||||
break
|
||||
return env
|
||||
|
||||
def get_base_args(self):
|
||||
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
potential_path = os.path.join(path.strip('"'), 'ansible-inventory')
|
||||
if os.path.isfile(potential_path) and os.access(potential_path, os.X_OK):
|
||||
return [potential_path, '-i', self.source]
|
||||
|
||||
# ansible-inventory was not found, look for backported module
|
||||
abs_module_path = os.path.abspath(os.path.join(
|
||||
os.path.dirname(__file__), '..', '..', '..', 'plugins',
|
||||
'ansible_inventory', 'backport.py'))
|
||||
self.method = 'ansible-inventory backport'
|
||||
|
||||
if not os.path.exists(abs_module_path):
|
||||
raise ImproperlyConfigured('Can not find inventory module')
|
||||
return [abs_module_path, '-i', self.source]
|
||||
|
||||
def get_proot_args(self, cmd, env):
|
||||
source_dir = functioning_dir(self.source)
|
||||
cwd = os.getcwd()
|
||||
if not check_proot_installed():
|
||||
raise RuntimeError("proot is not installed but is configured for use")
|
||||
|
||||
kwargs = {}
|
||||
if self.is_custom:
|
||||
# use source's tmp dir for proot, task manager will delete folder
|
||||
logger.debug("Using provided directory '{}' for isolation.".format(source_dir))
|
||||
kwargs['proot_temp_dir'] = source_dir
|
||||
cwd = source_dir
|
||||
else:
|
||||
host_name = name
|
||||
port = None
|
||||
if self.host_filter_re and not self.host_filter_re.match(host_name):
|
||||
logger.debug('Filtering host %s', host_name)
|
||||
return None
|
||||
host = None
|
||||
if host_name not in self.all_group.all_hosts:
|
||||
host = MemHost(host_name, self.source_dir, port)
|
||||
self.all_group.all_hosts[host_name] = host
|
||||
return self.all_group.all_hosts[host_name]
|
||||
# we can not safely store tmp data in source dir or trust script contents
|
||||
if env['AWX_PRIVATE_DATA_DIR']:
|
||||
# If this is non-blank, file credentials are being used and we need access
|
||||
private_data_dir = functioning_dir(env['AWX_PRIVATE_DATA_DIR'])
|
||||
logger.debug("Using private credential data in '{}'.".format(private_data_dir))
|
||||
kwargs['private_data_dir'] = private_data_dir
|
||||
self.tmp_private_dir = build_proot_temp_dir()
|
||||
logger.debug("Using fresh temporary directory '{}' for isolation.".format(self.tmp_private_dir))
|
||||
kwargs['proot_temp_dir'] = self.tmp_private_dir
|
||||
# Run from source's location so that custom script contents are in `show_paths`
|
||||
cwd = functioning_dir(self.source)
|
||||
logger.debug("Running from `{}` working directory.".format(cwd))
|
||||
|
||||
def get_hosts(self, name):
|
||||
'''
|
||||
Return iterator over one or more MemHost instances from host name or
|
||||
host pattern.
|
||||
'''
|
||||
def iternest(*args):
|
||||
if args:
|
||||
for i in args[0]:
|
||||
for j in iternest(*args[1:]):
|
||||
yield ''.join([str(i), j])
|
||||
else:
|
||||
yield ''
|
||||
if self.ipv6_port_re.match(name):
|
||||
yield self.get_host(name)
|
||||
return
|
||||
pattern_re = re.compile(r'(\[(?:(?:\d+\:\d+)|(?:[A-Za-z]\:[A-Za-z]))(?:\:\d+)??\])')
|
||||
iters = []
|
||||
for s in re.split(pattern_re, name):
|
||||
if re.match(pattern_re, s):
|
||||
start, end, step = (s[1:-1] + ':1').split(':')[:3]
|
||||
mapfunc = str
|
||||
if start in string.ascii_letters:
|
||||
istart = string.ascii_letters.index(start)
|
||||
iend = string.ascii_letters.index(end) + 1
|
||||
if istart >= iend:
|
||||
raise ValueError('invalid host range specified')
|
||||
seq = string.ascii_letters[istart:iend:int(step)]
|
||||
else:
|
||||
if start[0] == '0' and len(start) > 1:
|
||||
if len(start) != len(end):
|
||||
raise ValueError('invalid host range specified')
|
||||
mapfunc = lambda x: str(x).zfill(len(start))
|
||||
seq = xrange(int(start), int(end) + 1, int(step))
|
||||
iters.append(map(mapfunc, seq))
|
||||
elif re.search(r'[\[\]]', s):
|
||||
raise ValueError('invalid host range specified')
|
||||
elif s:
|
||||
iters.append([s])
|
||||
for iname in iternest(*iters):
|
||||
yield self.get_host(iname)
|
||||
|
||||
def get_group(self, name, all_group=None, child=False):
|
||||
'''
|
||||
Return a MemGroup instance from group name, creating if needed.
|
||||
'''
|
||||
all_group = all_group or self.all_group
|
||||
if name == 'all':
|
||||
return all_group
|
||||
if self.group_filter_re and not self.group_filter_re.match(name):
|
||||
logger.debug('Filtering group %s', name)
|
||||
return None
|
||||
if name not in self.all_group.all_groups:
|
||||
group = MemGroup(name, self.source_dir)
|
||||
if not child:
|
||||
all_group.add_child_group(group)
|
||||
self.all_group.all_groups[name] = group
|
||||
return self.all_group.all_groups[name]
|
||||
|
||||
def load(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class IniLoader(BaseLoader):
|
||||
'''
|
||||
Loader to read inventory from an INI-formatted text file.
|
||||
'''
|
||||
|
||||
def load(self):
|
||||
logger.info('Reading INI source: %s', self.source)
|
||||
group = self.all_group
|
||||
input_mode = 'host'
|
||||
for line in file(self.source, 'r'):
|
||||
line = line.split('#')[0].strip()
|
||||
if not line:
|
||||
continue
|
||||
elif line.startswith('[') and line.endswith(']'):
|
||||
# Mode change, possible new group name
|
||||
line = line[1:-1].strip()
|
||||
if line.endswith(':vars'):
|
||||
input_mode = 'vars'
|
||||
line = line[:-5]
|
||||
elif line.endswith(':children'):
|
||||
input_mode = 'children'
|
||||
line = line[:-9]
|
||||
else:
|
||||
input_mode = 'host'
|
||||
group = self.get_group(line)
|
||||
elif group:
|
||||
# If group is None, we are skipping this group and shouldn't
|
||||
# capture any children/variables/hosts under it.
|
||||
# Add hosts with inline variables, or variables/children to
|
||||
# an existing group.
|
||||
tokens = shlex.split(line)
|
||||
if input_mode == 'host':
|
||||
for host in self.get_hosts(tokens[0]):
|
||||
if not host:
|
||||
continue
|
||||
if len(tokens) > 1:
|
||||
for t in tokens[1:]:
|
||||
k,v = t.split('=', 1)
|
||||
host.variables[k] = v
|
||||
group.add_host(host)
|
||||
elif input_mode == 'children':
|
||||
group.child_group_by_name(line, self)
|
||||
elif input_mode == 'vars':
|
||||
for t in tokens:
|
||||
k, v = t.split('=', 1)
|
||||
group.variables[k] = v
|
||||
# TODO: expansion patterns are probably not going to be supported. YES THEY ARE!
|
||||
|
||||
|
||||
# from API documentation:
|
||||
#
|
||||
# if called with --list, inventory outputs like so:
|
||||
#
|
||||
# {
|
||||
# "databases" : {
|
||||
# "hosts" : [ "host1.example.com", "host2.example.com" ],
|
||||
# "vars" : {
|
||||
# "a" : true
|
||||
# }
|
||||
# },
|
||||
# "webservers" : [ "host2.example.com", "host3.example.com" ],
|
||||
# "atlanta" : {
|
||||
# "hosts" : [ "host1.example.com", "host4.example.com", "host5.example.com" ],
|
||||
# "vars" : {
|
||||
# "b" : false
|
||||
# },
|
||||
# "children": [ "marietta", "5points" ],
|
||||
# },
|
||||
# "marietta" : [ "host6.example.com" ],
|
||||
# "5points" : [ "host7.example.com" ]
|
||||
# }
|
||||
#
|
||||
# if called with --host <host_record_name> outputs JSON for that host
|
||||
|
||||
|
||||
class ExecutableJsonLoader(BaseLoader):
|
||||
return wrap_args_with_proot(cmd, cwd, **kwargs)
|
||||
|
||||
def command_to_json(self, cmd):
|
||||
data = {}
|
||||
stdout, stderr = '', ''
|
||||
try:
|
||||
if self.is_custom and getattr(settings, 'AWX_PROOT_ENABLED', False):
|
||||
if not check_proot_installed():
|
||||
raise RuntimeError("proot is not installed but is configured for use")
|
||||
kwargs = {'proot_temp_dir': self.source_dir} # TODO: Remove proot dir
|
||||
cmd = wrap_args_with_proot(cmd, self.source_dir, **kwargs)
|
||||
# Use ansible venv if it's available and setup to use
|
||||
env = dict(os.environ.items())
|
||||
if settings.ANSIBLE_USE_VENV:
|
||||
env['VIRTUAL_ENV'] = settings.ANSIBLE_VENV_PATH
|
||||
env['PATH'] = os.path.join(settings.ANSIBLE_VENV_PATH, "bin") + ":" + env['PATH']
|
||||
venv_libdir = os.path.join(settings.ANSIBLE_VENV_PATH, "lib")
|
||||
env.pop('PYTHONPATH', None) # default to none if no python_ver matches
|
||||
for python_ver in ["python2.7", "python2.6"]:
|
||||
if os.path.isdir(os.path.join(venv_libdir, python_ver)):
|
||||
env['PYTHONPATH'] = os.path.join(venv_libdir, python_ver, "site-packages") + ":"
|
||||
break
|
||||
env = self.build_env()
|
||||
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
||||
stdout, stderr = proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError('%r failed (rc=%d) with output: %s' % (cmd, proc.returncode, stderr))
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except ValueError:
|
||||
if ((self.is_custom or 'AWX_PRIVATE_DATA_DIR' in env) and
|
||||
getattr(settings, 'AWX_PROOT_ENABLED', False)):
|
||||
cmd = self.get_proot_args(cmd, env)
|
||||
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
||||
stdout, stderr = proc.communicate()
|
||||
|
||||
if self.tmp_private_dir:
|
||||
shutil.rmtree(self.tmp_private_dir, True)
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError('%s failed (rc=%d) with output:\n%s' % (self.method, proc.returncode, stderr))
|
||||
elif 'file not found' in stderr:
|
||||
# File not visible to inventory module due proot (exit code 0, Ansible behavior)
|
||||
raise IOError('Inventory module failed to find source {} with output:\n{}.'.format(self.source, stderr))
|
||||
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
if not isinstance(data, dict):
|
||||
raise TypeError('Returned JSON must be a dictionary, got %s instead' % str(type(data)))
|
||||
except:
|
||||
logger.error('Failed to load JSON from: %s', stdout)
|
||||
@ -388,93 +163,21 @@ class ExecutableJsonLoader(BaseLoader):
|
||||
return data
|
||||
|
||||
def load(self):
|
||||
logger.info('Reading executable JSON source: %s', self.source)
|
||||
data = self.command_to_json([self.source, '--list'])
|
||||
_meta = data.pop('_meta', {})
|
||||
base_args = self.get_base_args()
|
||||
logger.info('Reading Ansible inventory source: %s', self.source)
|
||||
data = self.command_to_json(base_args + ['--list'])
|
||||
|
||||
for k,v in data.iteritems():
|
||||
group = self.get_group(k)
|
||||
if not group:
|
||||
continue
|
||||
logger.info('Processing JSON output...')
|
||||
inventory = MemInventory(
|
||||
group_filter_re=self.group_filter_re, host_filter_re=self.host_filter_re)
|
||||
inventory = dict_to_mem_data(data, inventory=inventory)
|
||||
|
||||
# Load group hosts/vars/children from a dictionary.
|
||||
if isinstance(v, dict):
|
||||
# Process hosts within a group.
|
||||
hosts = v.get('hosts', {})
|
||||
if isinstance(hosts, dict):
|
||||
for hk, hv in hosts.iteritems():
|
||||
host = self.get_host(hk)
|
||||
if not host:
|
||||
continue
|
||||
if isinstance(hv, dict):
|
||||
host.variables.update(hv)
|
||||
else:
|
||||
self.logger.warning('Expected dict of vars for '
|
||||
'host "%s", got %s instead',
|
||||
hk, str(type(hv)))
|
||||
group.add_host(host)
|
||||
elif isinstance(hosts, (list, tuple)):
|
||||
for hk in hosts:
|
||||
host = self.get_host(hk)
|
||||
if not host:
|
||||
continue
|
||||
group.add_host(host)
|
||||
else:
|
||||
logger.warning('Expected dict or list of "hosts" for '
|
||||
'group "%s", got %s instead', k,
|
||||
str(type(hosts)))
|
||||
# Process group variables.
|
||||
vars = v.get('vars', {})
|
||||
if isinstance(vars, dict):
|
||||
group.variables.update(vars)
|
||||
else:
|
||||
self.logger.warning('Expected dict of vars for '
|
||||
'group "%s", got %s instead',
|
||||
k, str(type(vars)))
|
||||
# Process child groups.
|
||||
children = v.get('children', [])
|
||||
if isinstance(children, (list, tuple)):
|
||||
for c in children:
|
||||
child = self.get_group(c, self.all_group, child=True)
|
||||
if child:
|
||||
group.add_child_group(child)
|
||||
else:
|
||||
self.logger.warning('Expected list of children for '
|
||||
'group "%s", got %s instead',
|
||||
k, str(type(children)))
|
||||
|
||||
# Load host names from a list.
|
||||
elif isinstance(v, (list, tuple)):
|
||||
for h in v:
|
||||
host = self.get_host(h)
|
||||
if not host:
|
||||
continue
|
||||
group.add_host(host)
|
||||
else:
|
||||
logger.warning('')
|
||||
self.logger.warning('Expected dict or list for group "%s", '
|
||||
'got %s instead', k, str(type(v)))
|
||||
|
||||
if k != 'all':
|
||||
self.all_group.add_child_group(group)
|
||||
|
||||
# Invoke the executable once for each host name we've built up
|
||||
# to set their variables
|
||||
for k,v in self.all_group.all_hosts.iteritems():
|
||||
if 'hostvars' not in _meta:
|
||||
data = self.command_to_json([self.source, '--host', k.encode("utf-8")])
|
||||
else:
|
||||
data = _meta['hostvars'].get(k, {})
|
||||
if isinstance(data, dict):
|
||||
v.variables.update(data)
|
||||
else:
|
||||
self.logger.warning('Expected dict of vars for '
|
||||
'host "%s", got %s instead',
|
||||
k, str(type(data)))
|
||||
return inventory
|
||||
|
||||
|
||||
def load_inventory_source(source, all_group=None, group_filter_re=None,
|
||||
host_filter_re=None, exclude_empty_groups=False, is_custom=False):
|
||||
def load_inventory_source(source, group_filter_re=None,
|
||||
host_filter_re=None, exclude_empty_groups=False,
|
||||
is_custom=False):
|
||||
'''
|
||||
Load inventory from given source directory or file.
|
||||
'''
|
||||
@ -483,41 +186,25 @@ def load_inventory_source(source, all_group=None, group_filter_re=None,
|
||||
source = source.replace('azure.py', 'windows_azure.py')
|
||||
source = source.replace('satellite6.py', 'foreman.py')
|
||||
source = source.replace('vmware.py', 'vmware_inventory.py')
|
||||
logger.debug('Analyzing type of source: %s', source)
|
||||
original_all_group = all_group
|
||||
if not os.path.exists(source):
|
||||
raise IOError('Source does not exist: %s' % source)
|
||||
source = os.path.join(os.getcwd(), os.path.dirname(source),
|
||||
os.path.basename(source))
|
||||
source = os.path.normpath(os.path.abspath(source))
|
||||
if os.path.isdir(source):
|
||||
all_group = all_group or MemGroup('all', source)
|
||||
for filename in glob.glob(os.path.join(source, '*')):
|
||||
if filename.endswith(".ini") or os.path.isdir(filename):
|
||||
continue
|
||||
load_inventory_source(filename, all_group, group_filter_re,
|
||||
host_filter_re, is_custom=is_custom)
|
||||
else:
|
||||
all_group = all_group or MemGroup('all', os.path.dirname(source))
|
||||
if os.access(source, os.X_OK):
|
||||
ExecutableJsonLoader(source, all_group, group_filter_re, host_filter_re, is_custom).load()
|
||||
else:
|
||||
IniLoader(source, all_group, group_filter_re, host_filter_re).load()
|
||||
|
||||
inventory = AnsibleInventoryLoader(
|
||||
source=source,
|
||||
group_filter_re=group_filter_re,
|
||||
host_filter_re=host_filter_re,
|
||||
is_custom=is_custom).load()
|
||||
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
# Exclude groups that are completely empty.
|
||||
if original_all_group is None and exclude_empty_groups:
|
||||
for name, group in all_group.all_groups.items():
|
||||
if not group.children and not group.hosts and not group.variables:
|
||||
logger.debug('Removing empty group %s', name)
|
||||
for parent in group.parents:
|
||||
if group in parent.children:
|
||||
parent.children.remove(group)
|
||||
del all_group.all_groups[name]
|
||||
if original_all_group is None:
|
||||
logger.info('Loaded %d groups, %d hosts', len(all_group.all_groups),
|
||||
len(all_group.all_hosts))
|
||||
return all_group
|
||||
if exclude_empty_groups:
|
||||
inventory.delete_empty_groups()
|
||||
logger.info('Loaded %d groups, %d hosts', len(inventory.all_group.all_groups),
|
||||
len(inventory.all_group.all_hosts))
|
||||
return inventory.all_group
|
||||
|
||||
|
||||
class Command(NoArgsCommand):
|
||||
@ -573,22 +260,10 @@ class Command(NoArgsCommand):
|
||||
'specified as "foo.bar" to traverse nested dicts.'),
|
||||
)
|
||||
|
||||
def init_logging(self):
|
||||
def set_logging_level(self):
|
||||
log_levels = dict(enumerate([logging.WARNING, logging.INFO,
|
||||
logging.DEBUG, 0]))
|
||||
self.logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
self.logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
handler = logging.StreamHandler()
|
||||
|
||||
class Formatter(logging.Formatter):
|
||||
def format(self, record):
|
||||
record.relativeSeconds = record.relativeCreated / 1000.0
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
formatter = Formatter('%(relativeSeconds)9.3f %(levelname)-8s %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
self.logger.addHandler(handler)
|
||||
self.logger.propagate = False
|
||||
logger.setLevel(log_levels.get(self.verbosity, 0))
|
||||
|
||||
def _get_instance_id(self, from_dict, default=''):
|
||||
'''
|
||||
@ -650,8 +325,8 @@ class Command(NoArgsCommand):
|
||||
raise CommandError('Inventory with %s = %s cannot be found' % q.items()[0])
|
||||
except Inventory.MultipleObjectsReturned:
|
||||
raise CommandError('Inventory with %s = %s returned multiple results' % q.items()[0])
|
||||
self.logger.info('Updating inventory %d: %s' % (self.inventory.pk,
|
||||
self.inventory.name))
|
||||
logger.info('Updating inventory %d: %s' % (self.inventory.pk,
|
||||
self.inventory.name))
|
||||
|
||||
# Load inventory source if specified via environment variable (when
|
||||
# inventory_import is called from an InventoryUpdate task).
|
||||
@ -727,8 +402,8 @@ class Command(NoArgsCommand):
|
||||
for mem_host in self.all_group.all_hosts.values():
|
||||
instance_id = self._get_instance_id(mem_host.variables)
|
||||
if not instance_id:
|
||||
self.logger.warning('Host "%s" has no "%s" variable',
|
||||
mem_host.name, self.instance_id_var)
|
||||
logger.warning('Host "%s" has no "%s" variable',
|
||||
mem_host.name, self.instance_id_var)
|
||||
continue
|
||||
mem_host.instance_id = instance_id
|
||||
self.mem_instance_id_map[instance_id] = mem_host.name
|
||||
@ -768,11 +443,11 @@ class Command(NoArgsCommand):
|
||||
for host in hosts_qs.filter(pk__in=del_pks):
|
||||
host_name = host.name
|
||||
host.delete()
|
||||
self.logger.info('Deleted host "%s"', host_name)
|
||||
logger.info('Deleted host "%s"', host_name)
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('host deletions took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
len(all_del_pks))
|
||||
logger.warning('host deletions took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
len(all_del_pks))
|
||||
|
||||
def _delete_groups(self):
|
||||
'''
|
||||
@ -799,11 +474,11 @@ class Command(NoArgsCommand):
|
||||
group_name = group.name
|
||||
with ignore_inventory_computed_fields():
|
||||
group.delete()
|
||||
self.logger.info('Group "%s" deleted', group_name)
|
||||
logger.info('Group "%s" deleted', group_name)
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('group deletions took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
len(all_del_pks))
|
||||
logger.warning('group deletions took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
len(all_del_pks))
|
||||
|
||||
def _delete_group_children_and_hosts(self):
|
||||
'''
|
||||
@ -831,8 +506,8 @@ class Command(NoArgsCommand):
|
||||
for db_child in db_children.filter(pk__in=child_group_pks):
|
||||
group_group_count += 1
|
||||
db_group.children.remove(db_child)
|
||||
self.logger.info('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
logger.info('Group "%s" removed from group "%s"',
|
||||
db_child.name, db_group.name)
|
||||
# FIXME: Inventory source group relationships
|
||||
# Delete group/host relationships not present in imported data.
|
||||
db_hosts = db_group.hosts
|
||||
@ -859,12 +534,12 @@ class Command(NoArgsCommand):
|
||||
if db_host not in db_group.hosts.all():
|
||||
continue
|
||||
db_group.hosts.remove(db_host)
|
||||
self.logger.info('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
logger.info('Host "%s" removed from group "%s"',
|
||||
db_host.name, db_group.name)
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('group-group and group-host deletions took %d queries for %d relationships',
|
||||
len(connection.queries) - queries_before,
|
||||
group_group_count + group_host_count)
|
||||
logger.warning('group-group and group-host deletions took %d queries for %d relationships',
|
||||
len(connection.queries) - queries_before,
|
||||
group_group_count + group_host_count)
|
||||
|
||||
def _update_inventory(self):
|
||||
'''
|
||||
@ -884,11 +559,11 @@ class Command(NoArgsCommand):
|
||||
all_obj.variables = json.dumps(db_variables)
|
||||
all_obj.save(update_fields=['variables'])
|
||||
if self.overwrite_vars:
|
||||
self.logger.info('%s variables replaced from "all" group', all_name.capitalize())
|
||||
logger.info('%s variables replaced from "all" group', all_name.capitalize())
|
||||
else:
|
||||
self.logger.info('%s variables updated from "all" group', all_name.capitalize())
|
||||
logger.info('%s variables updated from "all" group', all_name.capitalize())
|
||||
else:
|
||||
self.logger.info('%s variables unmodified', all_name.capitalize())
|
||||
logger.info('%s variables unmodified', all_name.capitalize())
|
||||
|
||||
def _create_update_groups(self):
|
||||
'''
|
||||
@ -920,11 +595,11 @@ class Command(NoArgsCommand):
|
||||
group.variables = json.dumps(db_variables)
|
||||
group.save(update_fields=['variables'])
|
||||
if self.overwrite_vars:
|
||||
self.logger.info('Group "%s" variables replaced', group.name)
|
||||
logger.info('Group "%s" variables replaced', group.name)
|
||||
else:
|
||||
self.logger.info('Group "%s" variables updated', group.name)
|
||||
logger.info('Group "%s" variables updated', group.name)
|
||||
else:
|
||||
self.logger.info('Group "%s" variables unmodified', group.name)
|
||||
logger.info('Group "%s" variables unmodified', group.name)
|
||||
existing_group_names.add(group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
for group_name in all_group_names:
|
||||
@ -932,13 +607,13 @@ class Command(NoArgsCommand):
|
||||
continue
|
||||
mem_group = self.all_group.all_groups[group_name]
|
||||
group = self.inventory.groups.create(name=group_name, variables=json.dumps(mem_group.variables), description='imported')
|
||||
self.logger.info('Group "%s" added', group.name)
|
||||
logger.info('Group "%s" added', group.name)
|
||||
self._batch_add_m2m(self.inventory_source.groups, group)
|
||||
self._batch_add_m2m(self.inventory_source.groups, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('group updates took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
len(self.all_group.all_groups))
|
||||
logger.warning('group updates took %d queries for %d groups',
|
||||
len(connection.queries) - queries_before,
|
||||
len(self.all_group.all_groups))
|
||||
|
||||
def _update_db_host_from_mem_host(self, db_host, mem_host):
|
||||
# Update host variables.
|
||||
@ -971,24 +646,24 @@ class Command(NoArgsCommand):
|
||||
if update_fields:
|
||||
db_host.save(update_fields=update_fields)
|
||||
if 'name' in update_fields:
|
||||
self.logger.info('Host renamed from "%s" to "%s"', old_name, mem_host.name)
|
||||
logger.info('Host renamed from "%s" to "%s"', old_name, mem_host.name)
|
||||
if 'instance_id' in update_fields:
|
||||
if old_instance_id:
|
||||
self.logger.info('Host "%s" instance_id updated', mem_host.name)
|
||||
logger.info('Host "%s" instance_id updated', mem_host.name)
|
||||
else:
|
||||
self.logger.info('Host "%s" instance_id added', mem_host.name)
|
||||
logger.info('Host "%s" instance_id added', mem_host.name)
|
||||
if 'variables' in update_fields:
|
||||
if self.overwrite_vars:
|
||||
self.logger.info('Host "%s" variables replaced', mem_host.name)
|
||||
logger.info('Host "%s" variables replaced', mem_host.name)
|
||||
else:
|
||||
self.logger.info('Host "%s" variables updated', mem_host.name)
|
||||
logger.info('Host "%s" variables updated', mem_host.name)
|
||||
else:
|
||||
self.logger.info('Host "%s" variables unmodified', mem_host.name)
|
||||
logger.info('Host "%s" variables unmodified', mem_host.name)
|
||||
if 'enabled' in update_fields:
|
||||
if enabled:
|
||||
self.logger.info('Host "%s" is now enabled', mem_host.name)
|
||||
logger.info('Host "%s" is now enabled', mem_host.name)
|
||||
else:
|
||||
self.logger.info('Host "%s" is now disabled', mem_host.name)
|
||||
logger.info('Host "%s" is now disabled', mem_host.name)
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
def _create_update_hosts(self):
|
||||
@ -1062,17 +737,17 @@ class Command(NoArgsCommand):
|
||||
host_attrs['instance_id'] = instance_id
|
||||
db_host = self.inventory.hosts.create(**host_attrs)
|
||||
if enabled is False:
|
||||
self.logger.info('Host "%s" added (disabled)', mem_host_name)
|
||||
logger.info('Host "%s" added (disabled)', mem_host_name)
|
||||
else:
|
||||
self.logger.info('Host "%s" added', mem_host_name)
|
||||
logger.info('Host "%s" added', mem_host_name)
|
||||
self._batch_add_m2m(self.inventory_source.hosts, db_host)
|
||||
|
||||
self._batch_add_m2m(self.inventory_source.hosts, flush=True)
|
||||
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('host updates took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
len(self.all_group.all_hosts))
|
||||
logger.warning('host updates took %d queries for %d hosts',
|
||||
len(connection.queries) - queries_before,
|
||||
len(self.all_group.all_hosts))
|
||||
|
||||
def _create_update_group_children(self):
|
||||
'''
|
||||
@ -1092,14 +767,14 @@ class Command(NoArgsCommand):
|
||||
child_names = all_child_names[offset2:(offset2 + self._batch_size)]
|
||||
db_children_qs = self.inventory.groups.filter(name__in=child_names)
|
||||
for db_child in db_children_qs.filter(children__id=db_group.id):
|
||||
self.logger.info('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
logger.info('Group "%s" already child of group "%s"', db_child.name, db_group.name)
|
||||
for db_child in db_children_qs.exclude(children__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.children, db_child)
|
||||
self.logger.info('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
logger.info('Group "%s" added as child of "%s"', db_child.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.children, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('Group-group updates took %d queries for %d group-group relationships',
|
||||
len(connection.queries) - queries_before, group_group_count)
|
||||
logger.warning('Group-group updates took %d queries for %d group-group relationships',
|
||||
len(connection.queries) - queries_before, group_group_count)
|
||||
|
||||
def _create_update_group_hosts(self):
|
||||
# For each host in a mem group, add it to the parent(s) to which it
|
||||
@ -1118,23 +793,23 @@ class Command(NoArgsCommand):
|
||||
host_names = all_host_names[offset2:(offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(name__in=host_names)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
self.logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.hosts, db_host)
|
||||
self.logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
all_instance_ids = sorted([h.instance_id for h in mem_group.hosts if h.instance_id])
|
||||
for offset2 in xrange(0, len(all_instance_ids), self._batch_size):
|
||||
instance_ids = all_instance_ids[offset2:(offset2 + self._batch_size)]
|
||||
db_hosts_qs = self.inventory.hosts.filter(instance_id__in=instance_ids)
|
||||
for db_host in db_hosts_qs.filter(groups__id=db_group.id):
|
||||
self.logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
logger.info('Host "%s" already in group "%s"', db_host.name, db_group.name)
|
||||
for db_host in db_hosts_qs.exclude(groups__id=db_group.id):
|
||||
self._batch_add_m2m(db_group.hosts, db_host)
|
||||
self.logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
logger.info('Host "%s" added to group "%s"', db_host.name, db_group.name)
|
||||
self._batch_add_m2m(db_group.hosts, flush=True)
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('Group-host updates took %d queries for %d group-host relationships',
|
||||
len(connection.queries) - queries_before, group_host_count)
|
||||
logger.warning('Group-host updates took %d queries for %d group-host relationships',
|
||||
len(connection.queries) - queries_before, group_host_count)
|
||||
|
||||
def load_into_database(self):
|
||||
'''
|
||||
@ -1159,14 +834,14 @@ class Command(NoArgsCommand):
|
||||
def check_license(self):
|
||||
license_info = TaskEnhancer().validate_enhancements()
|
||||
if license_info.get('license_key', 'UNLICENSED') == 'UNLICENSED':
|
||||
self.logger.error(LICENSE_NON_EXISTANT_MESSAGE)
|
||||
logger.error(LICENSE_NON_EXISTANT_MESSAGE)
|
||||
raise CommandError('No Tower license found!')
|
||||
available_instances = license_info.get('available_instances', 0)
|
||||
free_instances = license_info.get('free_instances', 0)
|
||||
time_remaining = license_info.get('time_remaining', 0)
|
||||
new_count = Host.objects.active_count()
|
||||
if time_remaining <= 0 and not license_info.get('demo', False):
|
||||
self.logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
logger.error(LICENSE_EXPIRED_MESSAGE)
|
||||
raise CommandError("License has expired!")
|
||||
if free_instances < 0:
|
||||
d = {
|
||||
@ -1174,9 +849,9 @@ class Command(NoArgsCommand):
|
||||
'available_instances': available_instances,
|
||||
}
|
||||
if license_info.get('demo', False):
|
||||
self.logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
logger.error(DEMO_LICENSE_MESSAGE % d)
|
||||
else:
|
||||
self.logger.error(LICENSE_MESSAGE % d)
|
||||
logger.error(LICENSE_MESSAGE % d)
|
||||
raise CommandError('License count exceeded!')
|
||||
|
||||
def mark_license_failure(self, save=True):
|
||||
@ -1185,7 +860,7 @@ class Command(NoArgsCommand):
|
||||
|
||||
def handle_noargs(self, **options):
|
||||
self.verbosity = int(options.get('verbosity', 1))
|
||||
self.init_logging()
|
||||
self.set_logging_level()
|
||||
self.inventory_name = options.get('inventory_name', None)
|
||||
self.inventory_id = options.get('inventory_id', None)
|
||||
self.overwrite = bool(options.get('overwrite', False))
|
||||
@ -1224,7 +899,7 @@ class Command(NoArgsCommand):
|
||||
TODO: Remove this deprecation when we remove support for rax.py
|
||||
'''
|
||||
if self.source == "rax.py":
|
||||
self.logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
|
||||
logger.info("Rackspace inventory sync is Deprecated in Tower 3.1.0 and support for Rackspace will be removed in a future release.")
|
||||
|
||||
begin = time.time()
|
||||
self.load_inventory_from_database()
|
||||
@ -1249,7 +924,7 @@ class Command(NoArgsCommand):
|
||||
self.inventory_update.save()
|
||||
|
||||
# Load inventory from source.
|
||||
self.all_group = load_inventory_source(self.source, None,
|
||||
self.all_group = load_inventory_source(self.source,
|
||||
self.group_filter_re,
|
||||
self.host_filter_re,
|
||||
self.exclude_empty_groups,
|
||||
@ -1262,7 +937,7 @@ class Command(NoArgsCommand):
|
||||
with transaction.atomic():
|
||||
# Merge/overwrite inventory into database.
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('loading into database...')
|
||||
logger.warning('loading into database...')
|
||||
with ignore_inventory_computed_fields():
|
||||
if getattr(settings, 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', True):
|
||||
self.load_into_database()
|
||||
@ -1273,8 +948,8 @@ class Command(NoArgsCommand):
|
||||
queries_before2 = len(connection.queries)
|
||||
self.inventory.update_computed_fields()
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
logger.warning('update computed fields took %d queries',
|
||||
len(connection.queries) - queries_before2)
|
||||
try:
|
||||
self.check_license()
|
||||
except CommandError as e:
|
||||
@ -1282,11 +957,11 @@ class Command(NoArgsCommand):
|
||||
raise e
|
||||
|
||||
if settings.SQL_DEBUG:
|
||||
self.logger.warning('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
logger.warning('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
else:
|
||||
self.logger.info('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
logger.info('Inventory import completed for %s in %0.1fs',
|
||||
self.inventory_source.name, time.time() - begin)
|
||||
status = 'successful'
|
||||
|
||||
# If we're in debug mode, then log the queries and time
|
||||
@ -1294,9 +969,9 @@ class Command(NoArgsCommand):
|
||||
if settings.SQL_DEBUG:
|
||||
queries_this_import = connection.queries[queries_before:]
|
||||
sqltime = sum(float(x['time']) for x in queries_this_import)
|
||||
self.logger.warning('Inventory import required %d queries '
|
||||
'taking %0.3fs', len(queries_this_import),
|
||||
sqltime)
|
||||
logger.warning('Inventory import required %d queries '
|
||||
'taking %0.3fs', len(queries_this_import),
|
||||
sqltime)
|
||||
except Exception as e:
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
status = 'canceled'
|
||||
|
||||
@ -97,12 +97,12 @@ class Migration(migrations.Migration):
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
name='source',
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script Locally or in Project'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a project in Tower'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventoryupdate',
|
||||
name='source',
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script Locally or in Project'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
field=models.CharField(default=b'', max_length=32, blank=True, choices=[(b'', 'Manual'), (b'file', 'File, Directory or Script'), (b'scm', 'Sourced from a project in Tower'), (b'rax', 'Rackspace Cloud Servers'), (b'ec2', 'Amazon EC2'), (b'gce', 'Google Compute Engine'), (b'azure', 'Microsoft Azure Classic (deprecated)'), (b'azure_rm', 'Microsoft Azure Resource Manager'), (b'vmware', 'VMware vCenter'), (b'satellite6', 'Red Hat Satellite 6'), (b'cloudforms', 'Red Hat CloudForms'), (b'openstack', 'OpenStack'), (b'custom', 'Custom Script')]),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='inventorysource',
|
||||
@ -135,4 +135,16 @@ class Migration(migrations.Migration):
|
||||
name='notificationtemplate',
|
||||
unique_together=set([('organization', 'name')]),
|
||||
),
|
||||
|
||||
# Add verbosity option to inventory updates
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='verbosity',
|
||||
field=models.PositiveIntegerField(default=1, blank=True, choices=[(0, b'0 (WARNING)'), (1, b'1 (INFO)'), (2, b'2 (DEBUG)')]),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='verbosity',
|
||||
field=models.PositiveIntegerField(default=1, blank=True, choices=[(0, b'0 (WARNING)'), (1, b'1 (INFO)'), (2, b'2 (DEBUG)')]),
|
||||
),
|
||||
]
|
||||
|
||||
@ -743,7 +743,8 @@ class InventorySourceOptions(BaseModel):
|
||||
|
||||
SOURCE_CHOICES = [
|
||||
('', _('Manual')),
|
||||
('file', _('File, Directory or Script Locally or in Project')),
|
||||
('file', _('File, Directory or Script')),
|
||||
('scm', _('Sourced from a project in Tower')),
|
||||
('rax', _('Rackspace Cloud Servers')),
|
||||
('ec2', _('Amazon EC2')),
|
||||
('gce', _('Google Compute Engine')),
|
||||
@ -756,6 +757,13 @@ class InventorySourceOptions(BaseModel):
|
||||
('custom', _('Custom Script')),
|
||||
]
|
||||
|
||||
# From the options of the Django management base command
|
||||
INVENTORY_UPDATE_VERBOSITY_CHOICES = [
|
||||
(0, '0 (WARNING)'),
|
||||
(1, '1 (INFO)'),
|
||||
(2, '2 (DEBUG)'),
|
||||
]
|
||||
|
||||
# Use tools/scripts/get_ec2_filter_names.py to build this list.
|
||||
INSTANCE_FILTER_NAMES = [
|
||||
"architecture",
|
||||
@ -902,6 +910,11 @@ class InventorySourceOptions(BaseModel):
|
||||
blank=True,
|
||||
default=0,
|
||||
)
|
||||
verbosity = models.PositiveIntegerField(
|
||||
choices=INVENTORY_UPDATE_VERBOSITY_CHOICES,
|
||||
blank=True,
|
||||
default=1,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_ec2_region_choices(cls):
|
||||
@ -1002,7 +1015,7 @@ class InventorySourceOptions(BaseModel):
|
||||
if not self.source:
|
||||
return None
|
||||
cred = self.credential
|
||||
if cred and self.source != 'custom':
|
||||
if cred and self.source not in ('custom', 'scm'):
|
||||
# If a credential was provided, it's important that it matches
|
||||
# the actual inventory source being used (Amazon requires Amazon
|
||||
# credentials; Rackspace requires Rackspace credentials; etc...)
|
||||
@ -1139,14 +1152,14 @@ class InventorySource(UnifiedJobTemplate, InventorySourceOptions):
|
||||
def _get_unified_job_field_names(cls):
|
||||
return ['name', 'description', 'source', 'source_path', 'source_script', 'source_vars', 'schedule',
|
||||
'credential', 'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
|
||||
'timeout', 'launch_type', 'scm_project_update',]
|
||||
'timeout', 'verbosity', 'launch_type', 'scm_project_update',]
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# If update_fields has been specified, add our field names to it,
|
||||
# if it hasn't been specified, then we're just doing a normal save.
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
is_new_instance = not bool(self.pk)
|
||||
is_scm_type = self.scm_project_id is not None
|
||||
is_scm_type = self.scm_project_id is not None and self.source == 'scm'
|
||||
|
||||
# Set name automatically. Include PK (or placeholder) to make sure the names are always unique.
|
||||
replace_text = '__replace_%s__' % now()
|
||||
@ -1347,6 +1360,8 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin):
|
||||
if (self.source not in ('custom', 'ec2') and
|
||||
not (self.credential)):
|
||||
return False
|
||||
elif self.source in ('file', 'scm'):
|
||||
return False
|
||||
return True
|
||||
|
||||
'''
|
||||
|
||||
@ -1689,13 +1689,17 @@ class RunInventoryUpdate(BaseTask):
|
||||
env['FOREMAN_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'cloudforms':
|
||||
env['CLOUDFORMS_INI_PATH'] = cloud_credential
|
||||
elif inventory_update.source == 'file':
|
||||
elif inventory_update.source == 'scm':
|
||||
# Parse source_vars to dict, update env.
|
||||
env.update(parse_yaml_or_json(inventory_update.source_vars))
|
||||
elif inventory_update.source == 'custom':
|
||||
for env_k in inventory_update.source_vars_dict:
|
||||
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST:
|
||||
env[str(env_k)] = unicode(inventory_update.source_vars_dict[env_k])
|
||||
elif inventory_update.source == 'file':
|
||||
raise NotImplementedError('Can not update file sources through the task system.')
|
||||
# add private_data_files
|
||||
env['AWX_PRIVATE_DATA_DIR'] = kwargs.get('private_data_dir', '')
|
||||
return env
|
||||
|
||||
def build_args(self, inventory_update, **kwargs):
|
||||
@ -1759,7 +1763,7 @@ class RunInventoryUpdate(BaseTask):
|
||||
getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper()),
|
||||
])
|
||||
|
||||
elif inventory_update.source == 'file':
|
||||
elif inventory_update.source == 'scm':
|
||||
args.append(inventory_update.get_actual_source_path())
|
||||
elif inventory_update.source == 'custom':
|
||||
runpath = tempfile.mkdtemp(prefix='ansible_tower_launch_')
|
||||
@ -1770,11 +1774,10 @@ class RunInventoryUpdate(BaseTask):
|
||||
f.write(inventory_update.source_script.script.encode('utf-8'))
|
||||
f.close()
|
||||
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||
args.append(runpath)
|
||||
args.append(path)
|
||||
args.append("--custom")
|
||||
self.custom_dir_path.append(runpath)
|
||||
verbosity = getattr(settings, 'INVENTORY_UPDATE_VERBOSITY', 1)
|
||||
args.append('-v%d' % verbosity)
|
||||
args.append('-v%d' % inventory_update.verbosity)
|
||||
if settings.DEBUG:
|
||||
args.append('--traceback')
|
||||
return args
|
||||
|
||||
@ -1,890 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python
|
||||
import json
|
||||
import optparse
|
||||
|
||||
inv_list = {
|
||||
"ansible1.axialmarket.com": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com"
|
||||
],
|
||||
"ansible2.axialmarket.com": [
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com"
|
||||
],
|
||||
"app1new.axialmarket.com": [
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com"
|
||||
],
|
||||
"app2new.axialmarket.com": [
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com"
|
||||
],
|
||||
"app2t.axialmarket.com": [
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com"
|
||||
],
|
||||
"app2t.dev.axialmarket.com": [
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com"
|
||||
],
|
||||
"awx.axialmarket.com": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"axtdev2.axialmarket.com": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"backup1.axialmarket.com": [
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"bah.axialmarket.com": [
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com"
|
||||
],
|
||||
"bennew.axialmarket.com": [
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com"
|
||||
],
|
||||
"build0.axialmarket.com": [
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com"
|
||||
],
|
||||
"cburke0.axialmarket.com": [
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com"
|
||||
],
|
||||
"dabnew.axialmarket.com": [
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com"
|
||||
],
|
||||
"dannew.axialmarket.com": [
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com"
|
||||
],
|
||||
"de1-intenv.axialmarket.com": [
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com"
|
||||
],
|
||||
"dev11-20120311": [
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"dev11-20130828": [
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"dev11-20130903-dab": [
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"firecrow.axialmarket.com": [
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"herby0.axialmarket.com": [
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-02966c7a": [
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-0485b47c": [
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-0805a578": [
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-0a1e4777": [
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-0e05a57e": [
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-116f5861": [
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-197edf79": [
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-26008355": [
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-2ff6135e": [
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-3cbc6d50": [
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-3e9a7f5b": [
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-43f6a533": [
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-45906822": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-508c1923": [
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-52970021": [
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-57cc2c25": [
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-59f23536": [
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-7012b200": [
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-73fead03": [
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-75faa905": [
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-76e49b0e": [
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-78c9450b": [
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-7aa18911": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-7dfdae0d": [
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-8559d6fa": [
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-899768e4": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-918130fb": [
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-99ce0ceb": [
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-9a450df8": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-9fce0ced": [
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-a80682c4": [
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-b43ab5df": [
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-baa893c2": [
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-bc23a0cf": [
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-bed948cd": [
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-c200c4a8": [
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-c69ae2be": [
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-c6d33fa3": [
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-cc4d2abf": [
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-cc9c3fbc": [
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-d01dacb3": [
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-da6631b3": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-dc6631b5": [
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-f005a580": [
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-f605a586": [
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-f805a588": [
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com"
|
||||
],
|
||||
"i-f9829894": [
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com"
|
||||
],
|
||||
"inf.axialmarket.com": [
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com"
|
||||
],
|
||||
"jeffnew.axialmarket.com": [
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com"
|
||||
],
|
||||
"jenkins.axialmarket.com": [
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"jump.axialmarket.com": [
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_Dana_Spiegel": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_bah-20130614": [
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com",
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_herby-axial-20130903": [
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_herbyg-axial-201308": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com",
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_ike-20120322": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com",
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com",
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com",
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com",
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com",
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com",
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com",
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com",
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com",
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com",
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com",
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com",
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com",
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com",
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com",
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com",
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com",
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com",
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com",
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_matt-20120423": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com",
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com",
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com",
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com",
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com",
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com",
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com"
|
||||
],
|
||||
"key_mike-20121126": [
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com",
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com",
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"logstore1.axialmarket.com": [
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com"
|
||||
],
|
||||
"logstore2.axialmarket.com": [
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com"
|
||||
],
|
||||
"mattnew.axialmarket.com": [
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com"
|
||||
],
|
||||
"monitor0.axialmarket.com": [
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com"
|
||||
],
|
||||
"mx0.axialmarket.com": [
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com"
|
||||
],
|
||||
"mx0a.axialmarket.com": [
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"mx1.axialmarket.com": [
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com"
|
||||
],
|
||||
"mx2.axialmarket.com": [
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com"
|
||||
],
|
||||
"mx5.axialmarket.com": [
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com"
|
||||
],
|
||||
"pak.axialmarket.com": [
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"pak0.axialmarket.com": [
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"poundtest1.axialmarket.com": [
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"production-db7": [
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj": [
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"production-readonly-db7": [
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"rabbit.axialmarket.com": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com"
|
||||
],
|
||||
"rds_mysql": [
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"rds_parameter_group_axialmarket-5-5": [
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"rds_parameter_group_default_mysql5_1": [
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"rds_parameter_group_default_mysql5_5": [
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"rds_parameter_group_mysqldump": [
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"releng0.axialmarket.com": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com"
|
||||
],
|
||||
"releng1.axialmarket.com": [
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com"
|
||||
],
|
||||
"rexnew.axialmarket.com": [
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com"
|
||||
],
|
||||
"rollupy0.axialmarket.com": [
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_MTA": [
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com",
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com",
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_WWW-PROD-2013": [
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com",
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_backup2012": [
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_dataeng-test": [
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_development-2013-Jan": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com",
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com",
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com",
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com",
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com",
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com",
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com",
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com",
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com",
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com",
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com",
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com",
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com",
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com",
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com",
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com",
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com",
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com",
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_development-summer2012": [
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"security_group_development2012July": [
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_inf-mgmt-2013": [
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_jump": [
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_monitor-GOD-2013": [
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_pak-internal": [
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_production": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com",
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"security_group_production-NEWWORLD-201202": [
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com",
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_production-awx": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_releng20120404": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com",
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_util-20121011": [
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com",
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com"
|
||||
],
|
||||
"security_group_www-mktg": [
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"stevenew.axialmarket.com": [
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Environment_Production": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_INF-umgmt1": [
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-PROD-app1": [
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-PROD-app2": [
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-PROD-worker1": [
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-PROD-worker2": [
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-bah": [
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-bennew": [
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-dabnew": [
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-dannew": [
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-jeffnew": [
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-jumphost-2": [
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-mattnew": [
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-poundtest1": [
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-poundtest1_": [
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-rexnew": [
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-stevenew-replace": [
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-tannernew": [
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-thomasnew-2": [
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-willnew": [
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_NEWWORLD-worker1devnew": [
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_WWW-TEST": [
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_WWW1-MKTG": [
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_WWW2-MKTG": [
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_ansible": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com",
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_app2t_development_axialmarket_com": [
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_awx": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_axtdev2": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_backup1": [
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_build_server": [
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_cburke0": [
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_dataeng_test1": [
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_firecrow-dev": [
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_herby0": [
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_logstore1": [
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_logstore2": [
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_mx0": [
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_mx0a": [
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_mx1_new": [
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_mx2": [
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_new-testapp1": [
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_pak0_axialmarket_com": [
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_rabbit_axialmarket_com": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_releng0": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_releng1": [
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_rollupy0-PROD": [
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_tannernew_": [
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_testapp1": [
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Name_zabbix-upgrade": [
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_Use_RabbitMQ__celerycam__celerybeat__celeryd__postfix": [
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_environment_dev": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_environment_production": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_id_awx": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_id_axtdev2": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_os_ubuntu": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com",
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_primary_role_awx": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_primary_role_dev": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_purpose_syscleanup": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_role_awx_": [
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com"
|
||||
],
|
||||
"tag_role_dev_": [
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com"
|
||||
],
|
||||
"tannernew.axialmarket.com": [
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com"
|
||||
],
|
||||
"testapp1.axialmarket.com": [
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com"
|
||||
],
|
||||
"testapp2.axialmarket.com": [
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com"
|
||||
],
|
||||
"testnoelb.axialmarket.com": [
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"testworker1.axialmarket.com": [
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com"
|
||||
],
|
||||
"thomasnew.axialmarket.com": [
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com"
|
||||
],
|
||||
"type_db_m1_medium": [
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"type_db_m1_xlarge": [
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"type_m1_large": [
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com",
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com",
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com",
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com",
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com",
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com",
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com",
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com",
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com",
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com",
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com",
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com",
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com",
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com",
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com",
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com",
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com"
|
||||
],
|
||||
"type_m1_medium": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com",
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com",
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com",
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com",
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com",
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com",
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com",
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com",
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com",
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com",
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com",
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"type_m1_small": [
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com",
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com"
|
||||
],
|
||||
"type_m1_xlarge": [
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com",
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com",
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com"
|
||||
],
|
||||
"type_m2_2xlarge": [
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com"
|
||||
],
|
||||
"type_t1_micro": [
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com",
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com",
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com",
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com"
|
||||
],
|
||||
"us-east-1": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com",
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com",
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com",
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com",
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com",
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com",
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com",
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com",
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com",
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com",
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com",
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com",
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com",
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com",
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com",
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com",
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com",
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com",
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com",
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com",
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com",
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com",
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com",
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com",
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com",
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com",
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com",
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com",
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com",
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com",
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com",
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com",
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com",
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com",
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com",
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"us-east-1c": [
|
||||
"ec2-23-21-100-222.compute-1.amazonaws.com",
|
||||
"ec2-23-23-168-208.compute-1.amazonaws.com",
|
||||
"ec2-75-101-129-169.compute-1.amazonaws.com",
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-132.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com",
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com",
|
||||
"ec2-54-235-112-3.compute-1.amazonaws.com",
|
||||
"ec2-107-20-160-49.compute-1.amazonaws.com",
|
||||
"ec2-54-225-172-84.compute-1.amazonaws.com",
|
||||
"dev11-20130828.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"dev11-20130903-dab.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-db7-rdssnap-p4hsx77hy8l5zqj.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"us-east-1d": [
|
||||
"ec2-54-226-227-106.compute-1.amazonaws.com",
|
||||
"ec2-54-227-113-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-65-26.compute-1.amazonaws.com",
|
||||
"ec2-174-129-171-101.compute-1.amazonaws.com",
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-47.compute-1.amazonaws.com",
|
||||
"ec2-54-234-218-33.compute-1.amazonaws.com",
|
||||
"ec2-54-226-244-191.compute-1.amazonaws.com",
|
||||
"ec2-50-19-184-148.compute-1.amazonaws.com",
|
||||
"ec2-174-129-140-30.compute-1.amazonaws.com",
|
||||
"ec2-54-227-30-105.compute-1.amazonaws.com",
|
||||
"ec2-54-226-100-117.compute-1.amazonaws.com",
|
||||
"ec2-54-224-92-80.compute-1.amazonaws.com",
|
||||
"ec2-23-21-57-109.compute-1.amazonaws.com",
|
||||
"ec2-75-101-128-224.compute-1.amazonaws.com",
|
||||
"ec2-23-21-133-17.compute-1.amazonaws.com",
|
||||
"ec2-54-211-252-32.compute-1.amazonaws.com",
|
||||
"ec2-54-234-3-7.compute-1.amazonaws.com",
|
||||
"ec2-23-21-224-105.compute-1.amazonaws.com",
|
||||
"ec2-54-242-36-133.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-180.compute-1.amazonaws.com",
|
||||
"ec2-107-22-241-13.compute-1.amazonaws.com",
|
||||
"ec2-107-22-247-88.compute-1.amazonaws.com",
|
||||
"ec2-107-22-248-113.compute-1.amazonaws.com",
|
||||
"ec2-54-243-146-75.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-131.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com",
|
||||
"ec2-54-235-143-162.compute-1.amazonaws.com",
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com",
|
||||
"ec2-54-225-88-116.compute-1.amazonaws.com",
|
||||
"ec2-23-23-130-201.compute-1.amazonaws.com",
|
||||
"ec2-23-23-169-133.compute-1.amazonaws.com",
|
||||
"ec2-54-225-229-159.compute-1.amazonaws.com",
|
||||
"ec2-107-22-234-92.compute-1.amazonaws.com",
|
||||
"ec2-107-20-176-139.compute-1.amazonaws.com",
|
||||
"dev11-20120311.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"us-east-1e": [
|
||||
"ec2-23-23-170-30.compute-1.amazonaws.com",
|
||||
"production-db7.co735munpzcw.us-east-1.rds.amazonaws.com",
|
||||
"production-readonly-db7.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"web-mktg-1": [
|
||||
"web-mktg-1.co735munpzcw.us-east-1.rds.amazonaws.com"
|
||||
],
|
||||
"web1.axialmarket.com": [
|
||||
"ec2-75-101-157-248.compute-1.amazonaws.com"
|
||||
],
|
||||
"web2.axialmarket.com": [
|
||||
"ec2-75-101-159-82.compute-1.amazonaws.com"
|
||||
],
|
||||
"willnew.axialmarket.com": [
|
||||
"ec2-107-22-234-22.compute-1.amazonaws.com"
|
||||
],
|
||||
"worker1new.axialmarket.com": [
|
||||
"ec2-54-235-143-133.compute-1.amazonaws.com"
|
||||
],
|
||||
"worker1newdev.axialmarket.com": [
|
||||
"ec2-107-22-249-212.compute-1.amazonaws.com"
|
||||
],
|
||||
"worker2new.axialmarket.com": [
|
||||
"ec2-54-235-143-134.compute-1.amazonaws.com"
|
||||
],
|
||||
"www-test.axialmarket.com": [
|
||||
"ec2-54-234-233-19.compute-1.amazonaws.com"
|
||||
],
|
||||
'_meta': {
|
||||
'hostvars': {}
|
||||
}
|
||||
}
|
||||
|
||||
host_vars = {
|
||||
|
||||
}
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('--list', action='store_true', dest='list')
|
||||
parser.add_option('--host', dest='hostname', default='')
|
||||
options, args = parser.parse_args()
|
||||
if options.list:
|
||||
print json.dumps(inv_list, indent=4)
|
||||
elif options.hostname:
|
||||
print json.dumps(host_vars, indent=4)
|
||||
else:
|
||||
print json.dumps({}, indent=4)
|
||||
|
||||
@ -1,63 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Python
|
||||
import json
|
||||
import optparse
|
||||
import os
|
||||
|
||||
nhosts = int(os.environ.get('NHOSTS', 100))
|
||||
|
||||
inv_list = {
|
||||
'_meta': {
|
||||
'hostvars': {},
|
||||
},
|
||||
}
|
||||
|
||||
for n in xrange(nhosts):
|
||||
hostname = 'host-%08d.example.com' % n
|
||||
group_evens_odds = 'evens.example.com' if n % 2 == 0 else 'odds.example.com'
|
||||
group_threes = 'threes.example.com' if n % 3 == 0 else ''
|
||||
group_fours = 'fours.example.com' if n % 4 == 0 else ''
|
||||
group_fives = 'fives.example.com' if n % 5 == 0 else ''
|
||||
group_sixes = 'sixes.example.com' if n % 6 == 0 else ''
|
||||
group_sevens = 'sevens.example.com' if n % 7 == 0 else ''
|
||||
group_eights = 'eights.example.com' if n % 8 == 0 else ''
|
||||
group_nines = 'nines.example.com' if n % 9 == 0 else ''
|
||||
group_tens = 'tens.example.com' if n % 10 == 0 else ''
|
||||
group_by_10s = 'group-%07dX.example.com' % (n / 10)
|
||||
group_by_100s = 'group-%06dXX.example.com' % (n / 100)
|
||||
group_by_1000s = 'group-%05dXXX.example.com' % (n / 1000)
|
||||
for group in [group_evens_odds, group_threes, group_fours, group_fives, group_sixes, group_sevens, group_eights, group_nines, group_tens, group_by_10s]:
|
||||
if not group:
|
||||
continue
|
||||
if group in inv_list:
|
||||
inv_list[group]['hosts'].append(hostname)
|
||||
else:
|
||||
inv_list[group] = {'hosts': [hostname], 'children': [], 'vars': {'group_prefix': group.split('.')[0]}}
|
||||
if group_by_1000s not in inv_list:
|
||||
inv_list[group_by_1000s] = {'hosts': [], 'children': [], 'vars': {'group_prefix': group_by_1000s.split('.')[0]}}
|
||||
if group_by_100s not in inv_list:
|
||||
inv_list[group_by_100s] = {'hosts': [], 'children': [], 'vars': {'group_prefix': group_by_100s.split('.')[0]}}
|
||||
if group_by_100s not in inv_list[group_by_1000s]['children']:
|
||||
inv_list[group_by_1000s]['children'].append(group_by_100s)
|
||||
if group_by_10s not in inv_list[group_by_100s]['children']:
|
||||
inv_list[group_by_100s]['children'].append(group_by_10s)
|
||||
inv_list['_meta']['hostvars'][hostname] = {
|
||||
'ansible_ssh_user': 'example',
|
||||
'ansible_connection': 'local',
|
||||
'host_prefix': hostname.split('.')[0],
|
||||
'host_id': n,
|
||||
}
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option('--list', action='store_true', dest='list')
|
||||
parser.add_option('--host', dest='hostname', default='')
|
||||
options, args = parser.parse_args()
|
||||
if options.list:
|
||||
print json.dumps(inv_list, indent=4)
|
||||
elif options.hostname:
|
||||
print json.dumps(inv_list['_meta']['hostvars'][options.hostname], indent=4)
|
||||
else:
|
||||
print json.dumps({}, indent=4)
|
||||
|
||||
@ -9,68 +9,86 @@ import mock
|
||||
from django.core.management.base import CommandError
|
||||
|
||||
# AWX
|
||||
from awx.main.management.commands.inventory_import import (
|
||||
Command
|
||||
)
|
||||
from awx.main.management.commands import inventory_import
|
||||
from awx.main.models import Inventory, Host, Group
|
||||
from awx.main.utils.mem_inventory import dict_to_mem_data
|
||||
|
||||
|
||||
TEST_INVENTORY_INI = '''\
|
||||
# Some comment about blah blah blah...
|
||||
|
||||
[webservers]
|
||||
web1.example.com ansible_ssh_host=w1.example.net
|
||||
web2.example.com
|
||||
web3.example.com:1022
|
||||
|
||||
[webservers:vars] # Comment on a section
|
||||
webvar=blah # Comment on an option
|
||||
|
||||
[dbservers]
|
||||
db1.example.com
|
||||
db2.example.com
|
||||
|
||||
[dbservers:vars]
|
||||
dbvar=ugh
|
||||
|
||||
[servers:children]
|
||||
webservers
|
||||
dbservers
|
||||
|
||||
[servers:vars]
|
||||
varb=B
|
||||
|
||||
[all:vars]
|
||||
vara=A
|
||||
|
||||
[others]
|
||||
10.11.12.13
|
||||
10.12.14.16:8022
|
||||
fe80::1610:9fff:fedd:654b
|
||||
[fe80::1610:9fff:fedd:b654]:1022
|
||||
::1
|
||||
'''
|
||||
TEST_INVENTORY_CONTENT = {
|
||||
"_meta": {
|
||||
"hostvars": {}
|
||||
},
|
||||
"all": {
|
||||
"children": [
|
||||
"others",
|
||||
"servers",
|
||||
"ungrouped"
|
||||
],
|
||||
"vars": {
|
||||
"vara": "A"
|
||||
}
|
||||
},
|
||||
"dbservers": {
|
||||
"hosts": [
|
||||
"db1.example.com",
|
||||
"db2.example.com"
|
||||
],
|
||||
"vars": {
|
||||
"dbvar": "ugh"
|
||||
}
|
||||
},
|
||||
"others": {
|
||||
"hosts": {
|
||||
"10.11.12.13": {},
|
||||
"10.12.14.16": {"ansible_port": 8022},
|
||||
"::1": {},
|
||||
"fe80::1610:9fff:fedd:654b": {},
|
||||
"fe80::1610:9fff:fedd:b654": {"ansible_port": 1022}
|
||||
}
|
||||
},
|
||||
"servers": {
|
||||
"children": [
|
||||
"dbservers",
|
||||
"webservers"
|
||||
],
|
||||
"vars": {
|
||||
"varb": "B"
|
||||
}
|
||||
},
|
||||
"ungrouped": {},
|
||||
"webservers": {
|
||||
"hosts": {
|
||||
"web1.example.com": {
|
||||
"ansible_ssh_host": "w1.example.net"
|
||||
},
|
||||
"web2.example.com": {},
|
||||
"web3.example.com": {
|
||||
"ansible_port": 1022
|
||||
}
|
||||
},
|
||||
"vars": {
|
||||
"webvar": "blah"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def test_dir(tmpdir_factory):
|
||||
return tmpdir_factory.mktemp('inv_files', numbered=False)
|
||||
TEST_MEM_OBJECTS = dict_to_mem_data(TEST_INVENTORY_CONTENT)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def ini_file(test_dir):
|
||||
fn = test_dir.join('test_hosts')
|
||||
fn.write(TEST_INVENTORY_INI)
|
||||
return fn
|
||||
def mock_logging(self):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch.object(Command, 'check_license', mock.MagicMock())
|
||||
@pytest.mark.inventory_import
|
||||
@mock.patch.object(inventory_import.Command, 'check_license', mock.MagicMock())
|
||||
@mock.patch.object(inventory_import.Command, 'set_logging_level', mock_logging)
|
||||
class TestInvalidOptionsFunctional:
|
||||
|
||||
def test_invalid_options_invalid_source(self, inventory):
|
||||
# Give invalid file to the command
|
||||
cmd = Command()
|
||||
cmd = inventory_import.Command()
|
||||
with mock.patch('django.db.transaction.rollback'):
|
||||
with pytest.raises(IOError) as err:
|
||||
cmd.handle_noargs(
|
||||
@ -78,28 +96,33 @@ class TestInvalidOptionsFunctional:
|
||||
source='/tmp/pytest-of-root/pytest-7/inv_files0-invalid')
|
||||
assert 'Source does not exist' in err.value.message
|
||||
|
||||
def test_invalid_inventory_id(self, ini_file):
|
||||
cmd = Command()
|
||||
def test_invalid_inventory_id(self):
|
||||
cmd = inventory_import.Command()
|
||||
with pytest.raises(CommandError) as err:
|
||||
cmd.handle_noargs(inventory_id=42, source=ini_file.dirname)
|
||||
cmd.handle_noargs(inventory_id=42, source='/notapath/shouldnotmatter')
|
||||
assert 'id = 42' in err.value.message
|
||||
assert 'cannot be found' in err.value.message
|
||||
|
||||
def test_invalid_inventory_name(self, ini_file):
|
||||
cmd = Command()
|
||||
def test_invalid_inventory_name(self):
|
||||
cmd = inventory_import.Command()
|
||||
with pytest.raises(CommandError) as err:
|
||||
cmd.handle_noargs(inventory_name='fooservers', source=ini_file.dirname)
|
||||
cmd.handle_noargs(inventory_name='fooservers', source='/notapath/shouldnotmatter')
|
||||
assert 'name = fooservers' in err.value.message
|
||||
assert 'cannot be found' in err.value.message
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@mock.patch.object(Command, 'check_license', mock.MagicMock())
|
||||
@pytest.mark.inventory_import
|
||||
@mock.patch.object(inventory_import.Command, 'check_license', mock.MagicMock())
|
||||
@mock.patch.object(inventory_import.Command, 'set_logging_level', mock_logging)
|
||||
class TestINIImports:
|
||||
|
||||
def test_inventory_single_ini_import(self, inventory, ini_file, capsys):
|
||||
cmd = Command()
|
||||
r = cmd.handle_noargs(inventory_id=inventory.pk, source=ini_file.dirname)
|
||||
@mock.patch.object(inventory_import.AnsibleInventoryLoader, 'load', mock.MagicMock(return_value=TEST_MEM_OBJECTS))
|
||||
def test_inventory_single_ini_import(self, inventory, capsys):
|
||||
cmd = inventory_import.Command()
|
||||
r = cmd.handle_noargs(
|
||||
inventory_id=inventory.pk, source=__file__,
|
||||
method='backport')
|
||||
out, err = capsys.readouterr()
|
||||
assert r is None
|
||||
assert out == ''
|
||||
@ -117,10 +140,12 @@ class TestINIImports:
|
||||
reloaded_inv = Inventory.objects.get(pk=inventory.pk)
|
||||
assert reloaded_inv.variables_dict == {'vara': 'A'}
|
||||
|
||||
# Groups vars are applied to host in the newer versions
|
||||
assert Host.objects.get(name='web1.example.com').variables_dict == {'ansible_ssh_host': 'w1.example.net'}
|
||||
assert Host.objects.get(name='web3.example.com').variables_dict == {'ansible_ssh_port': 1022}
|
||||
assert Host.objects.get(name='fe80::1610:9fff:fedd:b654').variables_dict == {'ansible_ssh_port': 1022}
|
||||
assert Host.objects.get(name='10.12.14.16').variables_dict == {'ansible_ssh_port': 8022}
|
||||
# Old version uses `ansible_ssh_port` but new version uses `ansible_port`
|
||||
assert Host.objects.get(name='web3.example.com').variables_dict == {'ansible_port': 1022}
|
||||
assert Host.objects.get(name='fe80::1610:9fff:fedd:b654').variables_dict == {'ansible_port': 1022}
|
||||
assert Host.objects.get(name='10.12.14.16').variables_dict == {'ansible_port': 8022}
|
||||
|
||||
servers = Group.objects.get(name='servers')
|
||||
assert servers.variables_dict == {'varb': 'B'}
|
||||
@ -143,24 +168,53 @@ class TestINIImports:
|
||||
assert invsrc.inventory_updates.count() == 1
|
||||
assert invsrc.inventory_updates.first().status == 'successful'
|
||||
|
||||
def test_inventory_import_group_vars_file(self, inventory, ini_file, tmpdir_factory):
|
||||
# Create an extra group_vars file for group webservers
|
||||
gvarf = tmpdir_factory.mktemp('inv_files/group_vars', numbered=False).join('webservers')
|
||||
gvarf.write('''webservers_only_variable: foobar\n''')
|
||||
# Check creation of ad-hoc inventory source - this was not called with one specified
|
||||
assert reloaded_inv.inventory_sources.count() == 1
|
||||
assert reloaded_inv.inventory_sources.all()[0].source == 'file'
|
||||
|
||||
cmd = Command()
|
||||
cmd.handle_noargs(inventory_id=inventory.pk, source=ini_file.dirname)
|
||||
@mock.patch.object(
|
||||
inventory_import, 'load_inventory_source', mock.MagicMock(
|
||||
return_value=dict_to_mem_data(
|
||||
{
|
||||
"_meta": {
|
||||
"hostvars": {"foo": {"some_hostvar": "foobar"}}
|
||||
},
|
||||
"all": {
|
||||
"children": ["ungrouped"]
|
||||
},
|
||||
"ungrouped": {
|
||||
"hosts": ["foo"]
|
||||
}
|
||||
}).all_group
|
||||
)
|
||||
)
|
||||
def test_hostvars_are_saved(self, inventory):
|
||||
cmd = inventory_import.Command()
|
||||
cmd.handle_noargs(inventory_id=inventory.pk, source='doesnt matter')
|
||||
assert inventory.hosts.count() == 1
|
||||
h = inventory.hosts.all()[0]
|
||||
assert h.name == 'foo'
|
||||
assert h.variables_dict == {"some_hostvar": "foobar"}
|
||||
|
||||
servers = Group.objects.get(name='webservers')
|
||||
assert servers.variables_dict == {'webvar': 'blah', 'webservers_only_variable': 'foobar'}
|
||||
|
||||
def test_inventory_import_host_vars_file(self, inventory, ini_file, tmpdir_factory):
|
||||
# Create an extra host_vars file for one specific host
|
||||
gvarf = tmpdir_factory.mktemp('inv_files/host_vars', numbered=False).join('web1.example.com')
|
||||
gvarf.write('''host_only_variable: foobar\n''')
|
||||
|
||||
cmd = Command()
|
||||
cmd.handle_noargs(inventory_id=inventory.pk, source=ini_file.dirname)
|
||||
|
||||
Host.objects.get(name='web1.example.com').variables_dict == {
|
||||
'ansible_ssh_host': 'w1.example.net', 'host_only_variable': 'foobar'}
|
||||
@mock.patch.object(
|
||||
inventory_import, 'load_inventory_source', mock.MagicMock(
|
||||
return_value=dict_to_mem_data(
|
||||
{
|
||||
"_meta": {
|
||||
"hostvars": {}
|
||||
},
|
||||
"all": {
|
||||
"children": ["fooland", "barland"]
|
||||
},
|
||||
"fooland": {
|
||||
"children": ["barland"]
|
||||
},
|
||||
"barland": {
|
||||
"children": ["fooland"]
|
||||
}
|
||||
}).all_group
|
||||
)
|
||||
)
|
||||
def test_recursive_group_error(self, inventory):
|
||||
cmd = inventory_import.Command()
|
||||
cmd.handle_noargs(inventory_id=inventory.pk, source='doesnt matter')
|
||||
|
||||
@ -16,7 +16,8 @@ class TestSCMUpdateFeatures:
|
||||
inv_src = InventorySource(
|
||||
scm_project=project,
|
||||
source_path='inventory_file',
|
||||
inventory=inventory)
|
||||
inventory=inventory,
|
||||
source='scm')
|
||||
with mock.patch.object(inv_src.scm_project, 'update') as mck_update:
|
||||
inv_src.save()
|
||||
mck_update.assert_called_once_with()
|
||||
|
||||
@ -5,16 +5,12 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import string
|
||||
import StringIO
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import urlparse
|
||||
import unittest2 as unittest
|
||||
|
||||
# Django
|
||||
from django.conf import settings
|
||||
from django.core.management import call_command
|
||||
from django.utils.timezone import now
|
||||
from django.test.utils import override_settings
|
||||
@ -36,77 +32,6 @@ TEST_PLAYBOOK = '''- hosts: test-group
|
||||
command: test 2 = 2
|
||||
'''
|
||||
|
||||
TEST_INVENTORY_INI = '''\
|
||||
# Some comment about blah blah blah...
|
||||
|
||||
[webservers]
|
||||
web1.example.com ansible_ssh_host=w1.example.net
|
||||
web2.example.com
|
||||
web3.example.com:1022
|
||||
|
||||
[webservers:vars] # Comment on a section
|
||||
webvar=blah # Comment on an option
|
||||
|
||||
[dbservers]
|
||||
db1.example.com
|
||||
db2.example.com
|
||||
|
||||
[dbservers:vars]
|
||||
dbvar=ugh
|
||||
|
||||
[servers:children]
|
||||
webservers
|
||||
dbservers
|
||||
|
||||
[servers:vars]
|
||||
varb=B
|
||||
|
||||
[all:vars]
|
||||
vara=A
|
||||
|
||||
[others]
|
||||
10.11.12.13
|
||||
10.12.14.16:8022
|
||||
fe80::1610:9fff:fedd:654b
|
||||
[fe80::1610:9fff:fedd:b654]:1022
|
||||
::1
|
||||
'''
|
||||
|
||||
TEST_INVENTORY_INI_WITH_HOST_PATTERNS = '''\
|
||||
[dotcom]
|
||||
web[00:63].example.com ansible_ssh_user=example
|
||||
dns.example.com
|
||||
|
||||
[dotnet]
|
||||
db-[a:z].example.net
|
||||
ns.example.net
|
||||
|
||||
[dotorg]
|
||||
[A:F][0:9].example.org:1022 ansible_ssh_user=example
|
||||
mx.example.org
|
||||
|
||||
[dotus]
|
||||
lb[00:08:2].example.us even_odd=even
|
||||
lb[01:09:2].example.us even_odd=odd
|
||||
|
||||
[dotcc]
|
||||
media[0:9][0:9].example.cc
|
||||
'''
|
||||
|
||||
TEST_INVENTORY_INI_WITH_RECURSIVE_GROUPS = '''\
|
||||
[family:children]
|
||||
parent
|
||||
|
||||
[parent:children]
|
||||
child
|
||||
|
||||
[child:children]
|
||||
grandchild
|
||||
|
||||
[grandchild:children]
|
||||
parent
|
||||
'''
|
||||
|
||||
|
||||
class BaseCommandMixin(object):
|
||||
'''
|
||||
@ -449,412 +374,3 @@ class CleanupActivityStreamTest(BaseCommandMixin, BaseTest):
|
||||
self.assertFalse(count_after)
|
||||
self.assertTrue(cleanup_elapsed < (create_elapsed / 4),
|
||||
'create took %0.3fs, cleanup took %0.3fs, expected < %0.3fs' % (create_elapsed, cleanup_elapsed, create_elapsed / 4))
|
||||
|
||||
|
||||
@unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test')
|
||||
class InventoryImportTest(BaseCommandMixin, BaseLiveServerTest):
|
||||
'''
|
||||
Test cases for inventory_import management command.
|
||||
'''
|
||||
|
||||
def setUp(self):
|
||||
super(InventoryImportTest, self).setUp()
|
||||
self.start_rabbit()
|
||||
self.setup_instances()
|
||||
self.create_test_inventories()
|
||||
self.create_test_ini()
|
||||
self.create_test_license_file()
|
||||
|
||||
def tearDown(self):
|
||||
super(InventoryImportTest, self).tearDown()
|
||||
self.stop_rabbit()
|
||||
|
||||
def create_test_ini(self, inv_dir=None, ini_content=None):
|
||||
ini_content = ini_content or TEST_INVENTORY_INI
|
||||
handle, self.ini_path = tempfile.mkstemp(suffix='.txt', dir=inv_dir)
|
||||
ini_file = os.fdopen(handle, 'w')
|
||||
ini_file.write(ini_content)
|
||||
ini_file.close()
|
||||
self._temp_paths.append(self.ini_path)
|
||||
|
||||
def create_test_dir(self, host_names=None, group_names=None, suffix=''):
|
||||
host_names = host_names or []
|
||||
group_names = group_names or []
|
||||
if 'all' not in group_names:
|
||||
group_names.insert(0, 'all')
|
||||
self.inv_dir = tempfile.mkdtemp()
|
||||
self._temp_paths.append(self.inv_dir)
|
||||
self.create_test_ini(self.inv_dir)
|
||||
group_vars_dir = os.path.join(self.inv_dir, 'group_vars')
|
||||
os.makedirs(group_vars_dir)
|
||||
for group_name in group_names:
|
||||
if suffix == '.json':
|
||||
group_vars_content = '''{"test_group_name": "%s"}\n''' % group_name
|
||||
else:
|
||||
group_vars_content = '''test_group_name: %s\n''' % group_name
|
||||
group_vars_file = os.path.join(group_vars_dir, '%s%s' % (group_name, suffix))
|
||||
file(group_vars_file, 'wb').write(group_vars_content)
|
||||
if host_names:
|
||||
host_vars_dir = os.path.join(self.inv_dir, 'host_vars')
|
||||
os.makedirs(host_vars_dir)
|
||||
for host_name in host_names:
|
||||
if suffix == '.json':
|
||||
host_vars_content = '''{"test_host_name": "%s"}''' % host_name
|
||||
else:
|
||||
host_vars_content = '''test_host_name: %s''' % host_name
|
||||
host_vars_file = os.path.join(host_vars_dir, '%s%s' % (host_name, suffix))
|
||||
file(host_vars_file, 'wb').write(host_vars_content)
|
||||
|
||||
def check_adhoc_inventory_source(self, inventory, except_host_pks=None,
|
||||
except_group_pks=None):
|
||||
# Check that management command created a new inventory source and
|
||||
# related inventory update.
|
||||
inventory_sources = inventory.inventory_sources.filter(group=None)
|
||||
self.assertEqual(inventory_sources.count(), 1)
|
||||
inventory_source = inventory_sources[0]
|
||||
self.assertEqual(inventory_source.source, 'file')
|
||||
self.assertEqual(inventory_source.inventory_updates.count(), 1)
|
||||
inventory_update = inventory_source.inventory_updates.all()[0]
|
||||
self.assertEqual(inventory_update.status, 'successful')
|
||||
for host in inventory.hosts.all():
|
||||
if host.pk in (except_host_pks or []):
|
||||
continue
|
||||
source_pks = host.inventory_sources.values_list('pk', flat=True)
|
||||
self.assertTrue(inventory_source.pk in source_pks)
|
||||
for group in inventory.groups.all():
|
||||
if group.pk in (except_group_pks or []):
|
||||
continue
|
||||
source_pks = group.inventory_sources.values_list('pk', flat=True)
|
||||
self.assertTrue(inventory_source.pk in source_pks)
|
||||
|
||||
def test_dir_with_ini_file(self):
|
||||
self.create_test_dir(host_names=['db1.example.com', 'db2.example.com'],
|
||||
group_names=['dbservers'], suffix='')
|
||||
self.test_ini_file(self.inv_dir)
|
||||
self.create_test_dir(host_names=['db1.example.com', 'db2.example.com'],
|
||||
group_names=['dbservers'], suffix='.yml')
|
||||
self.test_ini_file(self.inv_dir)
|
||||
self.create_test_dir(host_names=['db1.example.com', 'db2.example.com'],
|
||||
group_names=['dbservers'], suffix='.yaml')
|
||||
self.test_ini_file(self.inv_dir)
|
||||
self.create_test_dir(host_names=['db1.example.com', 'db2.example.com'],
|
||||
group_names=['dbservers'], suffix='.json')
|
||||
self.test_ini_file(self.inv_dir)
|
||||
|
||||
def test_merge_from_ini_file(self, overwrite=False, overwrite_vars=False):
|
||||
new_inv_vars = json.dumps({'varc': 'C'})
|
||||
new_inv = self.organizations[0].inventories.create(name='inv123',
|
||||
variables=new_inv_vars)
|
||||
lb_host_vars = json.dumps({'lbvar': 'ni!'})
|
||||
lb_host = new_inv.hosts.create(name='lb.example.com',
|
||||
variables=lb_host_vars)
|
||||
lb_group = new_inv.groups.create(name='lbservers')
|
||||
servers_group_vars = json.dumps({'vard': 'D'})
|
||||
servers_group = new_inv.groups.create(name='servers',
|
||||
variables=servers_group_vars)
|
||||
servers_group.children.add(lb_group)
|
||||
lb_group.hosts.add(lb_host)
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path,
|
||||
overwrite=overwrite,
|
||||
overwrite_vars=overwrite_vars)
|
||||
self.assertEqual(result, None, stdout + stderr)
|
||||
# Check that inventory is populated as expected.
|
||||
new_inv = Inventory.objects.get(pk=new_inv.pk)
|
||||
expected_group_names = set(['servers', 'dbservers', 'webservers',
|
||||
'lbservers', 'others'])
|
||||
if overwrite:
|
||||
expected_group_names.remove('lbservers')
|
||||
group_names = set(new_inv.groups.values_list('name', flat=True))
|
||||
self.assertEqual(expected_group_names, group_names)
|
||||
expected_host_names = set(['web1.example.com', 'web2.example.com',
|
||||
'web3.example.com', 'db1.example.com',
|
||||
'db2.example.com', 'lb.example.com',
|
||||
'10.11.12.13', '10.12.14.16',
|
||||
'fe80::1610:9fff:fedd:654b',
|
||||
'fe80::1610:9fff:fedd:b654', '::1'])
|
||||
if overwrite:
|
||||
expected_host_names.remove('lb.example.com')
|
||||
host_names = set(new_inv.hosts.values_list('name', flat=True))
|
||||
self.assertEqual(expected_host_names, host_names)
|
||||
expected_inv_vars = {'vara': 'A', 'varc': 'C'}
|
||||
if overwrite_vars:
|
||||
expected_inv_vars.pop('varc')
|
||||
self.assertEqual(new_inv.variables_dict, expected_inv_vars)
|
||||
for host in new_inv.hosts.all():
|
||||
if host.name == 'web1.example.com':
|
||||
self.assertEqual(host.variables_dict,
|
||||
{'ansible_ssh_host': 'w1.example.net'})
|
||||
elif host.name in ('web3.example.com', 'fe80::1610:9fff:fedd:b654'):
|
||||
self.assertEqual(host.variables_dict, {'ansible_ssh_port': 1022})
|
||||
elif host.name == '10.12.14.16':
|
||||
self.assertEqual(host.variables_dict, {'ansible_ssh_port': 8022})
|
||||
elif host.name == 'lb.example.com':
|
||||
self.assertEqual(host.variables_dict, {'lbvar': 'ni!'})
|
||||
else:
|
||||
self.assertEqual(host.variables_dict, {})
|
||||
for group in new_inv.groups.all():
|
||||
if group.name == 'servers':
|
||||
expected_vars = {'varb': 'B', 'vard': 'D'}
|
||||
if overwrite_vars:
|
||||
expected_vars.pop('vard')
|
||||
self.assertEqual(group.variables_dict, expected_vars)
|
||||
children = set(group.children.values_list('name', flat=True))
|
||||
expected_children = set(['dbservers', 'webservers', 'lbservers'])
|
||||
if overwrite:
|
||||
expected_children.remove('lbservers')
|
||||
self.assertEqual(children, expected_children)
|
||||
self.assertEqual(group.hosts.count(), 0)
|
||||
elif group.name == 'dbservers':
|
||||
self.assertEqual(group.variables_dict, {'dbvar': 'ugh'})
|
||||
self.assertEqual(group.children.count(), 0)
|
||||
hosts = set(group.hosts.values_list('name', flat=True))
|
||||
host_names = set(['db1.example.com','db2.example.com'])
|
||||
self.assertEqual(hosts, host_names)
|
||||
elif group.name == 'webservers':
|
||||
self.assertEqual(group.variables_dict, {'webvar': 'blah'})
|
||||
self.assertEqual(group.children.count(), 0)
|
||||
hosts = set(group.hosts.values_list('name', flat=True))
|
||||
host_names = set(['web1.example.com','web2.example.com',
|
||||
'web3.example.com'])
|
||||
self.assertEqual(hosts, host_names)
|
||||
elif group.name == 'lbservers':
|
||||
self.assertEqual(group.variables_dict, {})
|
||||
self.assertEqual(group.children.count(), 0)
|
||||
hosts = set(group.hosts.values_list('name', flat=True))
|
||||
host_names = set(['lb.example.com'])
|
||||
self.assertEqual(hosts, host_names)
|
||||
if overwrite:
|
||||
except_host_pks = set()
|
||||
except_group_pks = set()
|
||||
else:
|
||||
except_host_pks = set([lb_host.pk])
|
||||
except_group_pks = set([lb_group.pk])
|
||||
self.check_adhoc_inventory_source(new_inv, except_host_pks,
|
||||
except_group_pks)
|
||||
|
||||
def test_overwrite_vars_from_ini_file(self):
|
||||
self.test_merge_from_ini_file(overwrite_vars=True)
|
||||
|
||||
def test_overwrite_from_ini_file(self):
|
||||
self.test_merge_from_ini_file(overwrite=True)
|
||||
|
||||
def test_ini_file_with_host_patterns(self):
|
||||
self.create_test_ini(ini_content=TEST_INVENTORY_INI_WITH_HOST_PATTERNS)
|
||||
# New empty inventory.
|
||||
new_inv = self.organizations[0].inventories.create(name='newb')
|
||||
self.assertEqual(new_inv.hosts.count(), 0)
|
||||
self.assertEqual(new_inv.groups.count(), 0)
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertEqual(result, None, stdout + stderr)
|
||||
# Check that inventory is populated as expected.
|
||||
new_inv = Inventory.objects.get(pk=new_inv.pk)
|
||||
expected_group_names = set(['dotcom', 'dotnet', 'dotorg', 'dotus', 'dotcc'])
|
||||
group_names = set(new_inv.groups.values_list('name', flat=True))
|
||||
self.assertEqual(expected_group_names, group_names)
|
||||
# Check that all host ranges are expanded into host names.
|
||||
expected_host_names = set()
|
||||
expected_host_names.update(['web%02d.example.com' % x for x in xrange(64)])
|
||||
expected_host_names.add('dns.example.com')
|
||||
expected_host_names.update(['db-%s.example.net' % x for x in string.ascii_lowercase])
|
||||
expected_host_names.add('ns.example.net')
|
||||
for x in 'ABCDEF':
|
||||
for y in xrange(10):
|
||||
expected_host_names.add('%s%d.example.org' % (x, y))
|
||||
expected_host_names.add('mx.example.org')
|
||||
expected_host_names.update(['lb%02d.example.us' % x for x in xrange(10)])
|
||||
expected_host_names.update(['media%02d.example.cc' % x for x in xrange(100)])
|
||||
host_names = set(new_inv.hosts.values_list('name', flat=True))
|
||||
self.assertEqual(expected_host_names, host_names)
|
||||
# Check hosts in dotcom group.
|
||||
group = new_inv.groups.get(name='dotcom')
|
||||
self.assertEqual(group.hosts.count(), 65)
|
||||
for host in group.hosts.filter( name__startswith='web'):
|
||||
self.assertEqual(host.variables_dict.get('ansible_ssh_user', ''), 'example')
|
||||
# Check hosts in dotnet group.
|
||||
group = new_inv.groups.get(name='dotnet')
|
||||
self.assertEqual(group.hosts.count(), 27)
|
||||
# Check hosts in dotorg group.
|
||||
group = new_inv.groups.get(name='dotorg')
|
||||
self.assertEqual(group.hosts.count(), 61)
|
||||
for host in group.hosts.all():
|
||||
if host.name.startswith('mx.'):
|
||||
continue
|
||||
self.assertEqual(host.variables_dict.get('ansible_ssh_user', ''), 'example')
|
||||
self.assertEqual(host.variables_dict.get('ansible_ssh_port', 22), 1022)
|
||||
# Check hosts in dotus group.
|
||||
group = new_inv.groups.get(name='dotus')
|
||||
self.assertEqual(group.hosts.count(), 10)
|
||||
for host in group.hosts.all():
|
||||
if int(host.name[2:4]) % 2 == 0:
|
||||
self.assertEqual(host.variables_dict.get('even_odd', ''), 'even')
|
||||
else:
|
||||
self.assertEqual(host.variables_dict.get('even_odd', ''), 'odd')
|
||||
# Check hosts in dotcc group.
|
||||
group = new_inv.groups.get(name='dotcc')
|
||||
self.assertEqual(group.hosts.count(), 100)
|
||||
# Check inventory source/update after running command.
|
||||
self.check_adhoc_inventory_source(new_inv)
|
||||
# Test with invalid host pattern -- alpha begin > end.
|
||||
self.create_test_ini(ini_content='[invalid]\nhost[X:P]')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertTrue(isinstance(result, ValueError), result)
|
||||
# Test with invalid host pattern -- different numeric pattern lengths.
|
||||
self.create_test_ini(ini_content='[invalid]\nhost[001:08]')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertTrue(isinstance(result, ValueError), result)
|
||||
# Test with invalid host pattern -- invalid range/slice spec.
|
||||
self.create_test_ini(ini_content='[invalid]\nhost[1:2:3:4]')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertTrue(isinstance(result, ValueError), result)
|
||||
# Test with invalid host pattern -- no begin.
|
||||
self.create_test_ini(ini_content='[invalid]\nhost[:9]')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertTrue(isinstance(result, ValueError), result)
|
||||
# Test with invalid host pattern -- no end.
|
||||
self.create_test_ini(ini_content='[invalid]\nhost[0:]')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertTrue(isinstance(result, ValueError), result)
|
||||
# Test with invalid host pattern -- invalid slice.
|
||||
self.create_test_ini(ini_content='[invalid]\nhost[0:9:Q]')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertTrue(isinstance(result, ValueError), result)
|
||||
|
||||
def test_ini_file_with_recursive_groups(self):
|
||||
self.create_test_ini(ini_content=TEST_INVENTORY_INI_WITH_RECURSIVE_GROUPS)
|
||||
new_inv = self.organizations[0].inventories.create(name='new')
|
||||
self.assertEqual(new_inv.hosts.count(), 0)
|
||||
self.assertEqual(new_inv.groups.count(), 0)
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=self.ini_path)
|
||||
self.assertEqual(result, None, stdout + stderr)
|
||||
|
||||
def test_executable_file(self):
|
||||
# Use existing inventory as source.
|
||||
old_inv = self.inventories[1]
|
||||
# Modify host name to contain brackets (AC-1295).
|
||||
old_host = old_inv.hosts.all()[0]
|
||||
old_host.name = '[hey look some brackets]'
|
||||
old_host.save()
|
||||
# New empty inventory.
|
||||
new_inv = self.organizations[0].inventories.create(name='newb')
|
||||
self.assertEqual(new_inv.hosts.count(), 0)
|
||||
self.assertEqual(new_inv.groups.count(), 0)
|
||||
# Use our own inventory script as executable file.
|
||||
rest_api_url = self.live_server_url
|
||||
parts = urlparse.urlsplit(rest_api_url)
|
||||
username, password = self.get_super_credentials()
|
||||
netloc = '%s:%s@%s' % (username, password, parts.netloc)
|
||||
rest_api_url = urlparse.urlunsplit([parts.scheme, netloc, parts.path,
|
||||
parts.query, parts.fragment])
|
||||
os.environ.setdefault('REST_API_URL', rest_api_url)
|
||||
os.environ['INVENTORY_ID'] = str(old_inv.pk)
|
||||
source = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'plugins',
|
||||
'inventory', 'awxrest.py')
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=source)
|
||||
self.assertEqual(result, None, stdout + stderr)
|
||||
# Check that inventory is populated as expected.
|
||||
new_inv = Inventory.objects.get(pk=new_inv.pk)
|
||||
self.assertEqual(old_inv.variables_dict, new_inv.variables_dict)
|
||||
old_groups = set(old_inv.groups.values_list('name', flat=True))
|
||||
new_groups = set(new_inv.groups.values_list('name', flat=True))
|
||||
self.assertEqual(old_groups, new_groups)
|
||||
old_hosts = set(old_inv.hosts.values_list('name', flat=True))
|
||||
new_hosts = set(new_inv.hosts.values_list('name', flat=True))
|
||||
self.assertEqual(old_hosts, new_hosts)
|
||||
for new_host in new_inv.hosts.all():
|
||||
old_host = old_inv.hosts.get(name=new_host.name)
|
||||
self.assertEqual(old_host.variables_dict, new_host.variables_dict)
|
||||
for new_group in new_inv.groups.all():
|
||||
old_group = old_inv.groups.get(name=new_group.name)
|
||||
self.assertEqual(old_group.variables_dict, new_group.variables_dict)
|
||||
old_children = set(old_group.children.values_list('name', flat=True))
|
||||
new_children = set(new_group.children.values_list('name', flat=True))
|
||||
self.assertEqual(old_children, new_children)
|
||||
old_hosts = set(old_group.hosts.values_list('name', flat=True))
|
||||
new_hosts = set(new_group.hosts.values_list('name', flat=True))
|
||||
self.assertEqual(old_hosts, new_hosts)
|
||||
self.check_adhoc_inventory_source(new_inv)
|
||||
|
||||
def test_executable_file_with_meta_hostvars(self):
|
||||
os.environ['INVENTORY_HOSTVARS'] = '1'
|
||||
self.test_executable_file()
|
||||
|
||||
def test_large_executable_file(self):
|
||||
new_inv = self.organizations[0].inventories.create(name='newec2')
|
||||
self.assertEqual(new_inv.hosts.count(), 0)
|
||||
self.assertEqual(new_inv.groups.count(), 0)
|
||||
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..', 'data'))
|
||||
inv_file = 'large_ec2_inventory.py'
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=inv_file)
|
||||
self.assertEqual(result, None, stdout + stderr)
|
||||
# Check that inventory is populated as expected within a reasonable
|
||||
# amount of time. Computed fields should also be updated.
|
||||
new_inv = Inventory.objects.get(pk=new_inv.pk)
|
||||
self.assertNotEqual(new_inv.hosts.count(), 0)
|
||||
self.assertNotEqual(new_inv.groups.count(), 0)
|
||||
self.assertNotEqual(new_inv.total_hosts, 0)
|
||||
self.assertNotEqual(new_inv.total_groups, 0)
|
||||
self.assertElapsedLessThan(60)
|
||||
|
||||
def _get_ngroups_for_nhosts(self, n):
|
||||
if n > 0:
|
||||
return min(n, 10) + ((n - 1) / 10 + 1) + ((n - 1) / 100 + 1) + ((n - 1) / 1000 + 1)
|
||||
else:
|
||||
return 0
|
||||
|
||||
def _check_largeinv_import(self, new_inv, nhosts):
|
||||
self._start_time = time.time()
|
||||
inv_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'largeinv.py')
|
||||
ngroups = self._get_ngroups_for_nhosts(nhosts)
|
||||
os.environ['NHOSTS'] = str(nhosts)
|
||||
result, stdout, stderr = self.run_command('inventory_import',
|
||||
inventory_id=new_inv.pk,
|
||||
source=inv_file,
|
||||
overwrite=True, verbosity=0)
|
||||
self.assertEqual(result, None, stdout + stderr)
|
||||
# Check that inventory is populated as expected within a reasonable
|
||||
# amount of time. Computed fields should also be updated.
|
||||
new_inv = Inventory.objects.get(pk=new_inv.pk)
|
||||
self.assertEqual(new_inv.hosts.count(), nhosts)
|
||||
self.assertEqual(new_inv.groups.count(), ngroups)
|
||||
self.assertEqual(new_inv.total_hosts, nhosts)
|
||||
self.assertEqual(new_inv.total_groups, ngroups)
|
||||
self.assertElapsedLessThan(120)
|
||||
|
||||
@unittest.skipIf(getattr(settings, 'LOCAL_DEVELOPMENT', False),
|
||||
'Skip this test in local development environments, '
|
||||
'which may vary widely on memory.')
|
||||
def test_large_inventory_file(self):
|
||||
new_inv = self.organizations[0].inventories.create(name='largeinv')
|
||||
self.assertEqual(new_inv.hosts.count(), 0)
|
||||
self.assertEqual(new_inv.groups.count(), 0)
|
||||
nhosts = 2000
|
||||
# Test initial import into empty inventory.
|
||||
self._check_largeinv_import(new_inv, nhosts)
|
||||
# Test re-importing and overwriting.
|
||||
self._check_largeinv_import(new_inv, nhosts)
|
||||
# Test re-importing with only half as many hosts.
|
||||
self._check_largeinv_import(new_inv, nhosts / 2)
|
||||
# Test re-importing that clears all hosts.
|
||||
self._check_largeinv_import(new_inv, 0)
|
||||
|
||||
@ -13,6 +13,7 @@ from awx.main.management.commands.inventory_import import (
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
class TestInvalidOptions:
|
||||
|
||||
def test_invalid_options_no_options_specified(self):
|
||||
|
||||
@ -79,6 +79,7 @@ def test_job_args_unredacted_passwords(job):
|
||||
assert extra_vars['secret_key'] == 'my_password'
|
||||
|
||||
|
||||
@pytest.mark.survey
|
||||
def test_update_kwargs_survey_invalid_default(survey_spec_factory):
|
||||
spec = survey_spec_factory('var2')
|
||||
spec['spec'][0]['required'] = False
|
||||
@ -91,6 +92,7 @@ def test_update_kwargs_survey_invalid_default(survey_spec_factory):
|
||||
assert json.loads(defaulted_extra_vars['extra_vars'])['var2'] == 2
|
||||
|
||||
|
||||
@pytest.mark.survey
|
||||
@pytest.mark.parametrize("question_type,default,expect_use,expect_value", [
|
||||
("multiplechoice", "", False, 'N/A'), # historical bug
|
||||
("multiplechoice", "zeb", False, 'N/A'), # zeb not in choices
|
||||
@ -125,6 +127,7 @@ def test_optional_survey_question_defaults(
|
||||
assert 'c' not in defaulted_extra_vars['extra_vars']
|
||||
|
||||
|
||||
@pytest.mark.survey
|
||||
class TestWorkflowSurveys:
|
||||
def test_update_kwargs_survey_defaults(self, survey_spec_factory):
|
||||
"Assure that the survey default over-rides a JT variable"
|
||||
|
||||
128
awx/main/tests/unit/utils/test_mem_inventory.py
Normal file
128
awx/main/tests/unit/utils/test_mem_inventory.py
Normal file
@ -0,0 +1,128 @@
|
||||
# AWX utils
|
||||
from awx.main.utils.mem_inventory import (
|
||||
MemInventory,
|
||||
mem_data_to_dict, dict_to_mem_data
|
||||
)
|
||||
|
||||
import pytest
|
||||
import json
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memory_inventory():
|
||||
inventory = MemInventory()
|
||||
h = inventory.get_host('my_host')
|
||||
h.variables = {'foo': 'bar'}
|
||||
g = inventory.get_group('my_group')
|
||||
g.variables = {'foobar': 'barfoo'}
|
||||
h2 = inventory.get_host('group_host')
|
||||
g.add_host(h2)
|
||||
return inventory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def JSON_of_inv():
|
||||
# Implemented as fixture becuase it may be change inside of tests
|
||||
return {
|
||||
"_meta": {
|
||||
"hostvars": {
|
||||
"group_host": {},
|
||||
"my_host": {"foo": "bar"}
|
||||
}
|
||||
},
|
||||
"all": {"children": ["my_group", "ungrouped"]},
|
||||
"my_group": {
|
||||
"hosts": ["group_host"],
|
||||
"vars": {"foobar": "barfoo"}
|
||||
},
|
||||
"ungrouped": {"hosts": ["my_host"]}
|
||||
}
|
||||
|
||||
|
||||
# Structure mentioned in official docs
|
||||
# https://docs.ansible.com/ansible/dev_guide/developing_inventory.html
|
||||
@pytest.fixture
|
||||
def JSON_with_lists():
|
||||
docs_example = '''{
|
||||
"databases" : {
|
||||
"hosts" : [ "host1.example.com", "host2.example.com" ],
|
||||
"vars" : {
|
||||
"a" : true
|
||||
}
|
||||
},
|
||||
"webservers" : [ "host2.example.com", "host3.example.com" ],
|
||||
"atlanta" : {
|
||||
"hosts" : [ "host1.example.com", "host4.example.com", "host5.example.com" ],
|
||||
"vars" : {
|
||||
"b" : false
|
||||
},
|
||||
"children": [ "marietta", "5points" ]
|
||||
},
|
||||
"marietta" : [ "host6.example.com" ],
|
||||
"5points" : [ "host7.example.com" ]
|
||||
}'''
|
||||
return json.loads(docs_example)
|
||||
|
||||
|
||||
# MemObject basic operations tests
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
def test_inventory_create_all_group():
|
||||
inventory = MemInventory()
|
||||
assert inventory.all_group.name == 'all'
|
||||
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
def test_create_child_group():
|
||||
inventory = MemInventory()
|
||||
g1 = inventory.get_group('g1')
|
||||
# Create new group by name as child of g1
|
||||
g2 = inventory.get_group('g2', g1)
|
||||
# Check that child is in the children of the parent group
|
||||
assert g1.children == [g2]
|
||||
# Check that _only_ the parent group is listed as a root group
|
||||
assert inventory.all_group.children == [g1]
|
||||
# Check that _both_ are tracked by the global `all_groups` dict
|
||||
assert set(inventory.all_group.all_groups.values()) == set([g1, g2])
|
||||
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
def test_ungrouped_mechanics():
|
||||
# ansible-inventory returns a group called `ungrouped`
|
||||
# we can safely treat this the same as the `all_group`
|
||||
inventory = MemInventory()
|
||||
ug = inventory.get_group('ungrouped')
|
||||
assert ug is inventory.all_group
|
||||
|
||||
|
||||
# MemObject --> JSON tests
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
def test_convert_memory_to_JSON_with_vars(memory_inventory):
|
||||
data = mem_data_to_dict(memory_inventory)
|
||||
# Assertions about the variables on the objects
|
||||
assert data['_meta']['hostvars']['my_host'] == {'foo': 'bar'}
|
||||
assert data['my_group']['vars'] == {'foobar': 'barfoo'}
|
||||
# Orphan host should be found in ungrouped false group
|
||||
assert data['ungrouped']['hosts'] == ['my_host']
|
||||
|
||||
|
||||
# JSON --> MemObject tests
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
def test_convert_JSON_to_memory_with_vars(JSON_of_inv):
|
||||
inventory = dict_to_mem_data(JSON_of_inv)
|
||||
# Assertions about the variables on the objects
|
||||
assert inventory.get_host('my_host').variables == {'foo': 'bar'}
|
||||
assert inventory.get_group('my_group').variables == {'foobar': 'barfoo'}
|
||||
# Host should be child of group
|
||||
assert inventory.get_host('group_host') in inventory.get_group('my_group').hosts
|
||||
|
||||
|
||||
@pytest.mark.inventory_import
|
||||
def test_host_lists_accepted(JSON_with_lists):
|
||||
inventory = dict_to_mem_data(JSON_with_lists)
|
||||
assert inventory.get_group('marietta').name == 'marietta'
|
||||
# Check that marietta's hosts was saved
|
||||
h = inventory.get_host('host6.example.com')
|
||||
assert h.name == 'host6.example.com'
|
||||
@ -5,6 +5,16 @@ from logstash.formatter import LogstashFormatterVersion1
|
||||
from copy import copy
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
|
||||
|
||||
class TimeFormatter(logging.Formatter):
|
||||
'''
|
||||
Custom log formatter used for inventory imports
|
||||
'''
|
||||
def format(self, record):
|
||||
record.relativeSeconds = record.relativeCreated / 1000.0
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
|
||||
class LogstashFormatter(LogstashFormatterVersion1):
|
||||
|
||||
315
awx/main/utils/mem_inventory.py
Normal file
315
awx/main/utils/mem_inventory.py
Normal file
@ -0,0 +1,315 @@
|
||||
# Copyright (c) 2017 Ansible by Red Hat
|
||||
# All Rights Reserved.
|
||||
|
||||
# Python
|
||||
import re
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
# Logger is used for any data-related messages so that the log level
|
||||
# can be adjusted on command invocation
|
||||
logger = logging.getLogger('awx.main.commands.inventory_import')
|
||||
|
||||
|
||||
__all__ = ['MemHost', 'MemGroup', 'MemInventory',
|
||||
'mem_data_to_dict', 'dict_to_mem_data']
|
||||
|
||||
|
||||
ipv6_port_re = re.compile(r'^\[([A-Fa-f0-9:]{3,})\]:(\d+?)$')
|
||||
|
||||
|
||||
# Models for in-memory objects that represent an inventory
|
||||
|
||||
|
||||
class MemObject(object):
|
||||
'''
|
||||
Common code shared between in-memory groups and hosts.
|
||||
'''
|
||||
|
||||
def __init__(self, name):
|
||||
assert name, 'no name'
|
||||
self.name = name
|
||||
|
||||
|
||||
class MemGroup(MemObject):
|
||||
'''
|
||||
In-memory representation of an inventory group.
|
||||
'''
|
||||
|
||||
def __init__(self, name):
|
||||
super(MemGroup, self).__init__(name)
|
||||
self.children = []
|
||||
self.hosts = []
|
||||
self.variables = {}
|
||||
self.parents = []
|
||||
# Used on the "all" group in place of previous global variables.
|
||||
# maps host and group names to hosts to prevent redudant additions
|
||||
self.all_hosts = {}
|
||||
self.all_groups = {}
|
||||
self.variables = {}
|
||||
logger.debug('Loaded group: %s', self.name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<_in-memory-group_ `{}`>'.format(self.name)
|
||||
|
||||
def add_child_group(self, group):
|
||||
assert group.name is not 'all', 'group name is all'
|
||||
assert isinstance(group, MemGroup), 'not MemGroup instance'
|
||||
logger.debug('Adding child group %s to parent %s', group.name, self.name)
|
||||
if group not in self.children:
|
||||
self.children.append(group)
|
||||
if self not in group.parents:
|
||||
group.parents.append(self)
|
||||
|
||||
def add_host(self, host):
|
||||
assert isinstance(host, MemHost), 'not MemHost instance'
|
||||
logger.debug('Adding host %s to group %s', host.name, self.name)
|
||||
if host not in self.hosts:
|
||||
self.hosts.append(host)
|
||||
|
||||
def debug_tree(self, group_names=None):
|
||||
group_names = group_names or set()
|
||||
if self.name in group_names:
|
||||
return
|
||||
logger.debug('Dumping tree for group "%s":', self.name)
|
||||
logger.debug('- Vars: %r', self.variables)
|
||||
for h in self.hosts:
|
||||
logger.debug('- Host: %s, %r', h.name, h.variables)
|
||||
for g in self.children:
|
||||
logger.debug('- Child: %s', g.name)
|
||||
logger.debug('----')
|
||||
group_names.add(self.name)
|
||||
for g in self.children:
|
||||
g.debug_tree(group_names)
|
||||
|
||||
|
||||
class MemHost(MemObject):
|
||||
'''
|
||||
In-memory representation of an inventory host.
|
||||
'''
|
||||
|
||||
def __init__(self, name, port=None):
|
||||
super(MemHost, self).__init__(name)
|
||||
self.variables = {}
|
||||
self.instance_id = None
|
||||
self.name = name
|
||||
if port:
|
||||
# was `ansible_ssh_port` in older Ansible/Tower versions
|
||||
self.variables['ansible_port'] = port
|
||||
logger.debug('Loaded host: %s', self.name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<_in-memory-host_ `{}`>'.format(self.name)
|
||||
|
||||
|
||||
class MemInventory(object):
|
||||
'''
|
||||
Common functions for an inventory loader from a given source.
|
||||
'''
|
||||
def __init__(self, all_group=None, group_filter_re=None, host_filter_re=None):
|
||||
if all_group:
|
||||
assert isinstance(all_group, MemGroup), '{} is not MemGroup instance'.format(all_group)
|
||||
self.all_group = all_group
|
||||
else:
|
||||
self.all_group = self.create_group('all')
|
||||
self.group_filter_re = group_filter_re
|
||||
self.host_filter_re = host_filter_re
|
||||
|
||||
def create_host(self, host_name, port):
|
||||
host = MemHost(host_name, port)
|
||||
self.all_group.all_hosts[host_name] = host
|
||||
return host
|
||||
|
||||
def get_host(self, name):
|
||||
'''
|
||||
Return a MemHost instance from host name, creating if needed. If name
|
||||
contains brackets, they will NOT be interpreted as a host pattern.
|
||||
'''
|
||||
m = ipv6_port_re.match(name)
|
||||
if m:
|
||||
host_name = m.groups()[0]
|
||||
port = int(m.groups()[1])
|
||||
elif name.count(':') == 1:
|
||||
host_name = name.split(':')[0]
|
||||
try:
|
||||
port = int(name.split(':')[1])
|
||||
except (ValueError, UnicodeDecodeError):
|
||||
logger.warning(u'Invalid port "%s" for host "%s"',
|
||||
name.split(':')[1], host_name)
|
||||
port = None
|
||||
else:
|
||||
host_name = name
|
||||
port = None
|
||||
if self.host_filter_re and not self.host_filter_re.match(host_name):
|
||||
logger.debug('Filtering host %s', host_name)
|
||||
return None
|
||||
if host_name not in self.all_group.all_hosts:
|
||||
self.create_host(host_name, port)
|
||||
return self.all_group.all_hosts[host_name]
|
||||
|
||||
def create_group(self, group_name):
|
||||
group = MemGroup(group_name)
|
||||
if group_name not in ['all', 'ungrouped']:
|
||||
self.all_group.all_groups[group_name] = group
|
||||
return group
|
||||
|
||||
def get_group(self, name, all_group=None, child=False):
|
||||
'''
|
||||
Return a MemGroup instance from group name, creating if needed.
|
||||
'''
|
||||
all_group = all_group or self.all_group
|
||||
if name in ['all', 'ungrouped']:
|
||||
return all_group
|
||||
if self.group_filter_re and not self.group_filter_re.match(name):
|
||||
logger.debug('Filtering group %s', name)
|
||||
return None
|
||||
if name not in self.all_group.all_groups:
|
||||
group = self.create_group(name)
|
||||
if not child:
|
||||
all_group.add_child_group(group)
|
||||
return self.all_group.all_groups[name]
|
||||
|
||||
def delete_empty_groups(self):
|
||||
for name, group in self.all_group.all_groups.items():
|
||||
if not group.children and not group.hosts and not group.variables:
|
||||
logger.debug('Removing empty group %s', name)
|
||||
for parent in group.parents:
|
||||
if group in parent.children:
|
||||
parent.children.remove(group)
|
||||
del self.all_group.all_groups[name]
|
||||
|
||||
|
||||
# Conversion utilities
|
||||
|
||||
def mem_data_to_dict(inventory):
|
||||
'''
|
||||
Given an in-memory construct of an inventory, returns a dictionary that
|
||||
follows Ansible guidelines on the structure of dynamic inventory sources
|
||||
|
||||
May be replaced by removing in-memory constructs within this file later
|
||||
'''
|
||||
all_group = inventory.all_group
|
||||
inventory_data = OrderedDict([])
|
||||
# Save hostvars to _meta
|
||||
inventory_data['_meta'] = OrderedDict([])
|
||||
hostvars = OrderedDict([])
|
||||
for name, host_obj in all_group.all_hosts.items():
|
||||
hostvars[name] = host_obj.variables
|
||||
inventory_data['_meta']['hostvars'] = hostvars
|
||||
# Save children of `all` group
|
||||
inventory_data['all'] = OrderedDict([])
|
||||
if all_group.variables:
|
||||
inventory_data['all']['vars'] = all_group.variables
|
||||
inventory_data['all']['children'] = [c.name for c in all_group.children]
|
||||
inventory_data['all']['children'].append('ungrouped')
|
||||
# Save details of declared groups individually
|
||||
ungrouped_hosts = set(all_group.all_hosts.keys())
|
||||
for name, group_obj in all_group.all_groups.items():
|
||||
group_host_names = [h.name for h in group_obj.hosts]
|
||||
group_children_names = [c.name for c in group_obj.children]
|
||||
group_data = OrderedDict([])
|
||||
if group_host_names:
|
||||
group_data['hosts'] = group_host_names
|
||||
ungrouped_hosts.difference_update(group_host_names)
|
||||
if group_children_names:
|
||||
group_data['children'] = group_children_names
|
||||
if group_obj.variables:
|
||||
group_data['vars'] = group_obj.variables
|
||||
inventory_data[name] = group_data
|
||||
# Save ungrouped hosts
|
||||
inventory_data['ungrouped'] = OrderedDict([])
|
||||
if ungrouped_hosts:
|
||||
inventory_data['ungrouped']['hosts'] = list(ungrouped_hosts)
|
||||
return inventory_data
|
||||
|
||||
|
||||
def dict_to_mem_data(data, inventory=None):
|
||||
'''
|
||||
In-place operation on `inventory`, adds contents from `data` to the
|
||||
in-memory representation of memory.
|
||||
May be destructive on `data`
|
||||
'''
|
||||
assert isinstance(data, dict), 'Expected dict, received {}'.format(type(data))
|
||||
if inventory is None:
|
||||
inventory = MemInventory()
|
||||
|
||||
_meta = data.pop('_meta', {})
|
||||
|
||||
for k,v in data.iteritems():
|
||||
group = inventory.get_group(k)
|
||||
if not group:
|
||||
continue
|
||||
|
||||
# Load group hosts/vars/children from a dictionary.
|
||||
if isinstance(v, dict):
|
||||
# Process hosts within a group.
|
||||
hosts = v.get('hosts', {})
|
||||
if isinstance(hosts, dict):
|
||||
for hk, hv in hosts.iteritems():
|
||||
host = inventory.get_host(hk)
|
||||
if not host:
|
||||
continue
|
||||
if isinstance(hv, dict):
|
||||
host.variables.update(hv)
|
||||
else:
|
||||
logger.warning('Expected dict of vars for '
|
||||
'host "%s", got %s instead',
|
||||
hk, str(type(hv)))
|
||||
group.add_host(host)
|
||||
elif isinstance(hosts, (list, tuple)):
|
||||
for hk in hosts:
|
||||
host = inventory.get_host(hk)
|
||||
if not host:
|
||||
continue
|
||||
group.add_host(host)
|
||||
else:
|
||||
logger.warning('Expected dict or list of "hosts" for '
|
||||
'group "%s", got %s instead', k,
|
||||
str(type(hosts)))
|
||||
# Process group variables.
|
||||
vars = v.get('vars', {})
|
||||
if isinstance(vars, dict):
|
||||
group.variables.update(vars)
|
||||
else:
|
||||
logger.warning('Expected dict of vars for '
|
||||
'group "%s", got %s instead',
|
||||
k, str(type(vars)))
|
||||
# Process child groups.
|
||||
children = v.get('children', [])
|
||||
if isinstance(children, (list, tuple)):
|
||||
for c in children:
|
||||
child = inventory.get_group(c, inventory.all_group, child=True)
|
||||
if child and c != 'ungrouped':
|
||||
group.add_child_group(child)
|
||||
else:
|
||||
logger.warning('Expected list of children for '
|
||||
'group "%s", got %s instead',
|
||||
k, str(type(children)))
|
||||
|
||||
# Load host names from a list.
|
||||
elif isinstance(v, (list, tuple)):
|
||||
for h in v:
|
||||
host = inventory.get_host(h)
|
||||
if not host:
|
||||
continue
|
||||
group.add_host(host)
|
||||
else:
|
||||
logger.warning('')
|
||||
logger.warning('Expected dict or list for group "%s", '
|
||||
'got %s instead', k, str(type(v)))
|
||||
|
||||
if k not in ['all', 'ungrouped']:
|
||||
inventory.all_group.add_child_group(group)
|
||||
|
||||
if _meta:
|
||||
for k,v in inventory.all_group.all_hosts.iteritems():
|
||||
meta_hostvars = _meta['hostvars'].get(k, {})
|
||||
if isinstance(meta_hostvars, dict):
|
||||
v.variables.update(meta_hostvars)
|
||||
else:
|
||||
logger.warning('Expected dict of vars for '
|
||||
'host "%s", got %s instead',
|
||||
k, str(type(meta_hostvars)))
|
||||
|
||||
return inventory
|
||||
326
awx/plugins/ansible_inventory/backport.py
Executable file
326
awx/plugins/ansible_inventory/backport.py
Executable file
@ -0,0 +1,326 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# (c) 2017, Brian Coca <bcoca@ansible.com>
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
import optparse
|
||||
from operator import attrgetter
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.cli import CLI
|
||||
from ansible.errors import AnsibleOptionsError
|
||||
from ansible.inventory import Inventory
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from ansible.vars import VariableManager
|
||||
|
||||
|
||||
try:
|
||||
from __main__ import display
|
||||
except ImportError:
|
||||
from ansible.utils.display import Display
|
||||
display = Display()
|
||||
|
||||
INTERNAL_VARS = frozenset([ 'ansible_facts',
|
||||
'ansible_version',
|
||||
'ansible_playbook_python',
|
||||
'inventory_dir',
|
||||
'inventory_file',
|
||||
'inventory_hostname',
|
||||
'inventory_hostname_short',
|
||||
'groups',
|
||||
'group_names',
|
||||
'omit',
|
||||
'playbook_dir',
|
||||
])
|
||||
|
||||
|
||||
class InventoryCLI(CLI):
|
||||
''' used to display or dump the configured inventory as Ansible sees it '''
|
||||
|
||||
ARGUMENTS = { 'host': 'The name of a host to match in the inventory, relevant when using --list',
|
||||
'group': 'The name of a group in the inventory, relevant when using --graph',
|
||||
}
|
||||
|
||||
def __init__(self, args):
|
||||
|
||||
super(InventoryCLI, self).__init__(args)
|
||||
self.vm = None
|
||||
self.loader = None
|
||||
|
||||
def parse(self):
|
||||
|
||||
self.parser = CLI.base_parser(
|
||||
usage='usage: %prog [options] [host|group]',
|
||||
epilog='Show Ansible inventory information, by default it uses the inventory script JSON format',
|
||||
inventory_opts=True,
|
||||
vault_opts=True
|
||||
)
|
||||
|
||||
# Actions
|
||||
action_group = optparse.OptionGroup(self.parser, "Actions", "One of following must be used on invocation, ONLY ONE!")
|
||||
action_group.add_option("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
|
||||
action_group.add_option("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
|
||||
action_group.add_option("--graph", action="store_true", default=False, dest='graph',
|
||||
help='create inventory graph, if supplying pattern it must be a valid group name')
|
||||
self.parser.add_option_group(action_group)
|
||||
|
||||
# Options
|
||||
self.parser.add_option("-y", "--yaml", action="store_true", default=False, dest='yaml',
|
||||
help='Use YAML format instead of default JSON, ignored for --graph')
|
||||
self.parser.add_option("--vars", action="store_true", default=False, dest='show_vars',
|
||||
help='Add vars to graph display, ignored unless used with --graph')
|
||||
|
||||
try:
|
||||
super(InventoryCLI, self).parse()
|
||||
except Exception as e:
|
||||
if 'Need to implement!' not in e.args[0]:
|
||||
raise
|
||||
# --- Start of 2.3+ super(InventoryCLI, self).parse() ---
|
||||
self.options, self.args = self.parser.parse_args(self.args[1:])
|
||||
if hasattr(self.options, 'tags') and not self.options.tags:
|
||||
# optparse defaults does not do what's expected
|
||||
self.options.tags = ['all']
|
||||
if hasattr(self.options, 'tags') and self.options.tags:
|
||||
if not C.MERGE_MULTIPLE_CLI_TAGS:
|
||||
if len(self.options.tags) > 1:
|
||||
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False)
|
||||
self.options.tags = [self.options.tags[-1]]
|
||||
|
||||
tags = set()
|
||||
for tag_set in self.options.tags:
|
||||
for tag in tag_set.split(u','):
|
||||
tags.add(tag.strip())
|
||||
self.options.tags = list(tags)
|
||||
|
||||
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
|
||||
if not C.MERGE_MULTIPLE_CLI_TAGS:
|
||||
if len(self.options.skip_tags) > 1:
|
||||
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False)
|
||||
self.options.skip_tags = [self.options.skip_tags[-1]]
|
||||
|
||||
skip_tags = set()
|
||||
for tag_set in self.options.skip_tags:
|
||||
for tag in tag_set.split(u','):
|
||||
skip_tags.add(tag.strip())
|
||||
self.options.skip_tags = list(skip_tags)
|
||||
# --- End of 2.3+ super(InventoryCLI, self).parse() ---
|
||||
|
||||
display.verbosity = self.options.verbosity
|
||||
|
||||
self.validate_conflicts(vault_opts=True)
|
||||
|
||||
# there can be only one! and, at least, one!
|
||||
used = 0
|
||||
for opt in (self.options.list, self.options.host, self.options.graph):
|
||||
if opt:
|
||||
used += 1
|
||||
if used == 0:
|
||||
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
|
||||
elif used > 1:
|
||||
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
|
||||
|
||||
# set host pattern to default if not supplied
|
||||
if len(self.args) > 0:
|
||||
self.options.pattern = self.args[0]
|
||||
else:
|
||||
self.options.pattern = 'all'
|
||||
|
||||
def run(self):
|
||||
|
||||
results = None
|
||||
|
||||
super(InventoryCLI, self).run()
|
||||
|
||||
# Initialize needed objects
|
||||
self.loader = DataLoader()
|
||||
self.vm = VariableManager()
|
||||
|
||||
# use vault if needed
|
||||
if self.options.vault_password_file:
|
||||
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader)
|
||||
elif self.options.ask_vault_pass:
|
||||
vault_pass = self.ask_vault_passwords()
|
||||
else:
|
||||
vault_pass = None
|
||||
|
||||
if vault_pass:
|
||||
self.loader.set_vault_password(vault_pass)
|
||||
|
||||
# actually get inventory and vars
|
||||
self.inventory = Inventory(loader=self.loader, variable_manager=self.vm, host_list=self.options.inventory)
|
||||
self.vm.set_inventory(self.inventory)
|
||||
|
||||
if self.options.host:
|
||||
hosts = self.inventory.get_hosts(self.options.host)
|
||||
if len(hosts) != 1:
|
||||
raise AnsibleOptionsError("You must pass a single valid host to --hosts parameter")
|
||||
|
||||
myvars = self.vm.get_vars(self.loader, host=hosts[0])
|
||||
self._remove_internal(myvars)
|
||||
|
||||
# FIXME: should we template first?
|
||||
results = self.dump(myvars)
|
||||
|
||||
elif self.options.graph:
|
||||
results = self.inventory_graph()
|
||||
elif self.options.list:
|
||||
top = self.inventory.get_group('all')
|
||||
if self.options.yaml:
|
||||
results = self.yaml_inventory(top)
|
||||
else:
|
||||
results = self.json_inventory(top)
|
||||
results = self.dump(results)
|
||||
|
||||
if results:
|
||||
display.display(results)
|
||||
exit(0)
|
||||
|
||||
exit(1)
|
||||
|
||||
def dump(self, stuff):
|
||||
|
||||
if self.options.yaml:
|
||||
import yaml
|
||||
from ansible.parsing.yaml.dumper import AnsibleDumper
|
||||
results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
|
||||
else:
|
||||
import json
|
||||
results = json.dumps(stuff, sort_keys=True, indent=4)
|
||||
|
||||
return results
|
||||
|
||||
def _remove_internal(self, dump):
|
||||
|
||||
for internal in INTERNAL_VARS:
|
||||
if internal in dump:
|
||||
del dump[internal]
|
||||
|
||||
def _remove_empty(self, dump):
|
||||
# remove empty keys
|
||||
for x in ('hosts', 'vars', 'children'):
|
||||
if x in dump and not dump[x]:
|
||||
del dump[x]
|
||||
|
||||
def _show_vars(self, dump, depth):
|
||||
result = []
|
||||
self._remove_internal(dump)
|
||||
if self.options.show_vars:
|
||||
for (name, val) in sorted(dump.items()):
|
||||
result.append(self._graph_name('{%s = %s}' % (name, val), depth + 1))
|
||||
return result
|
||||
|
||||
def _graph_name(self, name, depth=0):
|
||||
if depth:
|
||||
name = " |" * (depth) + "--%s" % name
|
||||
return name
|
||||
|
||||
def _graph_group(self, group, depth=0):
|
||||
|
||||
result = [self._graph_name('@%s:' % group.name, depth)]
|
||||
depth = depth + 1
|
||||
for kid in sorted(group.child_groups, key=attrgetter('name')):
|
||||
result.extend(self._graph_group(kid, depth))
|
||||
|
||||
if group.name != 'all':
|
||||
for host in sorted(group.hosts, key=attrgetter('name')):
|
||||
result.append(self._graph_name(host.name, depth))
|
||||
result.extend(self._show_vars(host.get_vars(), depth))
|
||||
|
||||
result.extend(self._show_vars(group.get_vars(), depth))
|
||||
|
||||
return result
|
||||
|
||||
def inventory_graph(self):
|
||||
|
||||
start_at = self.inventory.get_group(self.options.pattern)
|
||||
if start_at:
|
||||
return '\n'.join(self._graph_group(start_at))
|
||||
else:
|
||||
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
|
||||
|
||||
def json_inventory(self, top):
|
||||
|
||||
def format_group(group):
|
||||
results = {}
|
||||
results[group.name] = {}
|
||||
if group.name != 'all':
|
||||
results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
|
||||
results[group.name]['vars'] = group.get_vars()
|
||||
results[group.name]['children'] = []
|
||||
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
|
||||
results[group.name]['children'].append(subgroup.name)
|
||||
results.update(format_group(subgroup))
|
||||
|
||||
self._remove_empty(results[group.name])
|
||||
return results
|
||||
|
||||
results = format_group(top)
|
||||
|
||||
# populate meta
|
||||
results['_meta'] = {'hostvars': {}}
|
||||
hosts = self.inventory.get_hosts()
|
||||
for host in hosts:
|
||||
results['_meta']['hostvars'][host.name] = self.vm.get_vars(self.loader, host=host)
|
||||
self._remove_internal(results['_meta']['hostvars'][host.name])
|
||||
|
||||
return results
|
||||
|
||||
def yaml_inventory(self, top):
|
||||
|
||||
seen = []
|
||||
|
||||
def format_group(group):
|
||||
results = {}
|
||||
|
||||
# initialize group + vars
|
||||
results[group.name] = {}
|
||||
results[group.name]['vars'] = group.get_vars()
|
||||
|
||||
# subgroups
|
||||
results[group.name]['children'] = {}
|
||||
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
|
||||
if subgroup.name != 'all':
|
||||
results[group.name]['children'].update(format_group(subgroup))
|
||||
|
||||
# hosts for group
|
||||
results[group.name]['hosts'] = {}
|
||||
if group.name != 'all':
|
||||
for h in sorted(group.hosts, key=attrgetter('name')):
|
||||
myvars = {}
|
||||
if h.name not in seen: # avoid defining host vars more than once
|
||||
seen.append(h.name)
|
||||
myvars = self.vm.get_vars(self.loader, host=h)
|
||||
self._remove_internal(myvars)
|
||||
results[group.name]['hosts'][h.name] = myvars
|
||||
|
||||
self._remove_empty(results[group.name])
|
||||
return results
|
||||
|
||||
return format_group(top)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import imp
|
||||
import subprocess
|
||||
import sys
|
||||
with open(__file__) as f:
|
||||
imp.load_source('ansible.cli.inventory', __file__ + '.py', f)
|
||||
ansible_path = subprocess.check_output(['which', 'ansible']).strip()
|
||||
sys.argv[0] = 'ansible-inventory'
|
||||
execfile(ansible_path)
|
||||
@ -907,6 +907,10 @@ LOGGING = {
|
||||
},
|
||||
'json': {
|
||||
'()': 'awx.main.utils.formatters.LogstashFormatter'
|
||||
},
|
||||
'timed_import': {
|
||||
'()': 'awx.main.utils.formatters.TimeFormatter',
|
||||
'format': '%(relativeSeconds)9.3f %(levelname)-8s %(message)s'
|
||||
}
|
||||
},
|
||||
'handlers': {
|
||||
@ -958,6 +962,11 @@ LOGGING = {
|
||||
'backupCount': 5,
|
||||
'formatter':'simple',
|
||||
},
|
||||
'inventory_import': {
|
||||
'level': 'DEBUG',
|
||||
'class':'logging.StreamHandler',
|
||||
'formatter': 'timed_import',
|
||||
},
|
||||
'task_system': {
|
||||
'level': 'INFO',
|
||||
'class':'logging.handlers.RotatingFileHandler',
|
||||
@ -1029,6 +1038,10 @@ LOGGING = {
|
||||
'awx.main.commands.run_callback_receiver': {
|
||||
'handlers': ['callback_receiver'],
|
||||
},
|
||||
'awx.main.commands.inventory_import': {
|
||||
'handlers': ['inventory_import'],
|
||||
'propagate': False
|
||||
},
|
||||
'awx.main.tasks': {
|
||||
'handlers': ['task_system'],
|
||||
},
|
||||
|
||||
@ -11,6 +11,12 @@ Fields that should be specified on creation of SCM inventory source:
|
||||
- `source_path` - relative path inside of the project indicating a
|
||||
directory or a file, if left blank, "" is still a relative path
|
||||
indicating the root directory of the project
|
||||
- the `source` field should be set to "scm"
|
||||
|
||||
Additionally:
|
||||
|
||||
- `source_vars` - if these are set on a "file" type inventory source
|
||||
then they will be passed to the environment vars when running
|
||||
|
||||
A user should not be able to update this inventory source via through
|
||||
the endpoint `/inventory_sources/N/update/`. Instead, they should update
|
||||
@ -40,18 +46,26 @@ update the project.
|
||||
|
||||
> Any Inventory Ansible supports should be supported by this feature
|
||||
|
||||
This statement is the overall goal and should hold true absolutely for
|
||||
Ansible version 2.4 and beyond due to the use of `ansible-inventory`.
|
||||
Versions of Ansible before that may not support all valid inventory syntax
|
||||
because the internal mechanism is different.
|
||||
This is accomplished by making use of the `ansible-inventory` command.
|
||||
the inventory import tower-manage command will check for the existnce
|
||||
of `ansible-inventory` and if it is not present, it will call a backported
|
||||
version of it. The backport is maintained as its own GPL3 licensed
|
||||
repository.
|
||||
|
||||
Documentation should reflect the limitations of inventory file syntax
|
||||
support in old Ansible versions.
|
||||
https://github.com/ansible/ansible-inventory-backport
|
||||
|
||||
# Acceptance Criteria Notes
|
||||
Because the internal mechanism is different, we need some coverage
|
||||
testing with Ansible versions pre-2.4 and after.
|
||||
|
||||
# Acceptance Criteria Use Cases
|
||||
|
||||
Some test scenarios to look at:
|
||||
- Obviously use a git repo with examples of host patterns, etc.
|
||||
- Test projects that use scripts
|
||||
- Test projects that have multiple inventory files in a directory,
|
||||
group_vars, host_vars, etc.
|
||||
- Test scripts in the project repo
|
||||
- Test scripts that use environment variables provided by a credential
|
||||
in Tower
|
||||
- Test multiple inventories that use the same project, pointing to different
|
||||
files / directories inside of the project
|
||||
- Feature works correctly even if project doesn't have any playbook files
|
||||
@ -61,3 +75,43 @@ Some test scenarios to look at:
|
||||
- If the project SCM update encounters errors, it should not run the
|
||||
inventory updates
|
||||
|
||||
# Notes for Official Documentation
|
||||
|
||||
The API guide should summarize what is in the use details.
|
||||
Once the UI implementation is done, the product docs should cover its
|
||||
standard use.
|
||||
|
||||
## Update-on-launch
|
||||
|
||||
This type of inventory source will not allow the `update_on_launch` field
|
||||
to be set to True. This is because of concerns related to the task
|
||||
manager job dependency tree.
|
||||
|
||||
We should document the alternatives for a user to accomplish the same thing
|
||||
through in a different way.
|
||||
|
||||
### Alternative 1: Use same project for playbook
|
||||
|
||||
You can make a job template that uses a project as well as an inventory
|
||||
that updates from that same project. In this case, you can set the project
|
||||
to `update_on_launch`, in which case it will trigger an inventory update
|
||||
if needed.
|
||||
|
||||
### Alternative 2: Use the project in a workflow
|
||||
|
||||
If you must use a different project for the playbook than for the inventory
|
||||
source, then you can still place the project in a workflow and then have
|
||||
a job template run on success of the project update.
|
||||
|
||||
This is guaranteed to have the inventory update "on time" (by this we mean
|
||||
that the inventory changes are complete before the job template is launched),
|
||||
because the project does not transition to the completed state
|
||||
until the inventory update is finished.
|
||||
|
||||
Note that a failed inventory update does not mark the project as failed.
|
||||
|
||||
## Lazy inventory updates
|
||||
|
||||
It should also be noted that not every project update will trigger a
|
||||
corresponding inventory update. If the project revision has not changed
|
||||
and the inventory has not been edited, the inventory update will not fire.
|
||||
|
||||
@ -8,3 +8,5 @@ markers =
|
||||
ac: access control test
|
||||
license_feature: ensure license features are accessible or not depending on license
|
||||
mongo_db: drop mongodb test database before test runs
|
||||
survey: tests related to survey feature
|
||||
inventory_import: tests of code used by inventory import command
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user