diff --git a/Makefile b/Makefile index febf1e6a1d..5cbc9ac400 100644 --- a/Makefile +++ b/Makefile @@ -170,7 +170,7 @@ endif .DEFAULT_GOAL := build -.PHONY: clean clean-tmp rebase push requirements requirements_dev \ +.PHONY: clean clean-tmp clean-venv rebase push requirements requirements_dev \ requirements_jenkins \ develop refresh adduser migrate dbchange dbshell runserver celeryd \ receiver test test_unit test_coverage coverage_html test_jenkins dev_build \ @@ -216,6 +216,9 @@ clean-ui: clean-tmp: rm -rf tmp/ +clean-venv: + rm -rf venv/ + # Remove temporary build files, compiled Python files. clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle rm -rf awx/lib/site-packages diff --git a/awx/api/permissions.py b/awx/api/permissions.py index 6e1320e2d8..285441421d 100644 --- a/awx/api/permissions.py +++ b/awx/api/permissions.py @@ -19,7 +19,7 @@ from awx.main.utils import get_object_or_400 logger = logging.getLogger('awx.api.permissions') __all__ = ['ModelAccessPermission', 'JobTemplateCallbackPermission', - 'TaskPermission', 'ProjectUpdatePermission'] + 'TaskPermission', 'ProjectUpdatePermission', 'UserPermission'] class ModelAccessPermission(permissions.BasePermission): ''' @@ -202,3 +202,10 @@ class ProjectUpdatePermission(ModelAccessPermission): def check_post_permissions(self, request, view, obj=None): project = get_object_or_400(view.model, pk=view.kwargs['pk']) return check_user_access(request.user, view.model, 'start', project) + + +class UserPermission(ModelAccessPermission): + def check_post_permissions(self, request, view, obj=None): + if request.user.is_superuser: + return True + raise PermissionDenied() diff --git a/awx/api/views.py b/awx/api/views.py index 9686387f0c..9ccc739336 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -1,3 +1,4 @@ + # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. @@ -1156,6 +1157,7 @@ class UserList(ListCreateAPIView): model = User serializer_class = UserSerializer + permission_classes = (UserPermission,) def post(self, request, *args, **kwargs): ret = super(UserList, self).post( request, *args, **kwargs) @@ -1321,7 +1323,7 @@ class UserDetail(RetrieveUpdateDestroyAPIView): can_admin = request.user.can_access(User, 'admin', obj, request.data) su_only_edit_fields = ('is_superuser', 'is_system_auditor') - admin_only_edit_fields = ('last_name', 'first_name', 'username', 'is_active') + admin_only_edit_fields = ('username', 'is_active') fields_to_check = () if not request.user.is_superuser: @@ -3002,7 +3004,7 @@ class JobJobTasksList(BaseJobEventsList): # need stats on grandchildren, sorted by child. queryset = (JobEvent.objects.filter(parent__parent=parent_task, parent__event__in=STARTING_EVENTS) - .values('parent__id', 'event', 'changed', 'failed') + .values('parent__id', 'event', 'changed') .annotate(num=Count('event')) .order_by('parent__id')) @@ -3063,13 +3065,10 @@ class JobJobTasksList(BaseJobEventsList): # make appropriate changes to the task data. for child_data in data.get(task_start_event.id, []): if child_data['event'] == 'runner_on_failed': + task_data['failed'] = True task_data['host_count'] += child_data['num'] task_data['reported_hosts'] += child_data['num'] - if child_data['failed']: - task_data['failed'] = True - task_data['failed_count'] += child_data['num'] - else: - task_data['skipped_count'] += child_data['num'] + task_data['failed_count'] += child_data['num'] elif child_data['event'] == 'runner_on_ok': task_data['host_count'] += child_data['num'] task_data['reported_hosts'] += child_data['num'] diff --git a/awx/main/fields.py b/awx/main/fields.py index 92ed69672f..e95dbc1ee7 100644 --- a/awx/main/fields.py +++ b/awx/main/fields.py @@ -54,10 +54,6 @@ class AutoOneToOneField(models.OneToOneField): AutoSingleRelatedObjectDescriptor(related)) - - - - def resolve_role_field(obj, field): ret = [] @@ -71,8 +67,8 @@ def resolve_role_field(obj, field): return [] if len(field_components) == 1: - Role_ = get_current_apps().get_model('main', 'Role') - if type(obj) is not Role_: + role_cls = str(get_current_apps().get_model('main', 'Role')) + if not str(type(obj)) == role_cls: raise Exception(smart_text('{} refers to a {}, not a Role'.format(field, type(obj)))) ret.append(obj.id) else: diff --git a/awx/main/management/commands/inventory_import.py b/awx/main/management/commands/inventory_import.py index 21031f23cc..4ae521cd5c 100644 --- a/awx/main/management/commands/inventory_import.py +++ b/awx/main/management/commands/inventory_import.py @@ -22,6 +22,7 @@ import yaml from django.conf import settings from django.core.management.base import NoArgsCommand, CommandError from django.db import connection, transaction +from django.utils.encoding import smart_text # AWX from awx.main.models import * # noqa @@ -606,7 +607,7 @@ class Command(NoArgsCommand): break instance_id = from_dict.get(key, default) from_dict = instance_id - return instance_id + return smart_text(instance_id) def _get_enabled(self, from_dict, default=None): ''' diff --git a/awx/main/migrations/0032_v302_credential_permissions_update.py b/awx/main/migrations/0032_v302_credential_permissions_update.py index a961be6dcf..2587588e6d 100644 --- a/awx/main/migrations/0032_v302_credential_permissions_update.py +++ b/awx/main/migrations/0032_v302_credential_permissions_update.py @@ -25,5 +25,6 @@ class Migration(migrations.Migration): name='use_role', field=awx.main.fields.ImplicitRoleField(related_name='+', parent_role=[b'admin_role'], to='main.Role', null=b'True'), ), + migrations.RunPython(rbac.infer_credential_org_from_team), migrations.RunPython(rbac.rebuild_role_hierarchy), ] diff --git a/awx/main/migrations/_rbac.py b/awx/main/migrations/_rbac.py index b60ac65691..80ecc69ebc 100644 --- a/awx/main/migrations/_rbac.py +++ b/awx/main/migrations/_rbac.py @@ -2,7 +2,9 @@ import logging from time import time from django.utils.encoding import smart_text +from django.db import transaction from django.db.models import Q +from django.db.utils import IntegrityError from collections import defaultdict from awx.main.utils import getattrd @@ -490,3 +492,11 @@ def rebuild_role_hierarchy(apps, schema_editor): logger.info('Done.') +def infer_credential_org_from_team(apps, schema_editor): + Credential = apps.get_model('main', "Credential") + for cred in Credential.objects.exclude(deprecated_team__isnull=True): + try: + with transaction.atomic(): + _update_credential_parents(cred.deprecated_team.organization, cred) + except IntegrityError: + logger.info("Organization<{}> credential for old Team<{}> credential already created".format(cred.deprecated_team.organization.pk, cred.pk)) diff --git a/awx/main/tasks.py b/awx/main/tasks.py index b77275c0fd..6ae8ccb63c 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -1253,9 +1253,14 @@ class RunInventoryUpdate(BaseTask): credential = inventory_update.credential if credential: - cp.set(section, 'hostname', credential.host) + cp.set(section, 'url', credential.host) cp.set(section, 'username', credential.username) cp.set(section, 'password', decrypt_field(credential, 'password')) + cp.set(section, 'ssl_verify', "false") + + section = 'cache' + cp.add_section(section) + cp.set(section, 'max_age', "0") elif inventory_update.source == 'azure_rm': section = 'azure' diff --git a/awx/main/tests/functional/api/test_credential.py b/awx/main/tests/functional/api/test_credential.py index 3c79e62e33..f1e7a2b1dd 100644 --- a/awx/main/tests/functional/api/test_credential.py +++ b/awx/main/tests/functional/api/test_credential.py @@ -71,7 +71,6 @@ def test_create_user_credential_via_user_credentials_list_xfail(post, alice, bob def test_create_team_credential(post, get, team, organization, org_admin, team_member): response = post(reverse('api:credential_list'), { 'team': team.id, - 'organization': organization.id, 'name': 'Some name', 'username': 'someusername' }, org_admin) @@ -81,6 +80,9 @@ def test_create_team_credential(post, get, team, organization, org_admin, team_m assert response.status_code == 200 assert response.data['count'] == 1 + # Assure that credential's organization is implictly set to team's org + assert response.data['results'][0]['summary_fields']['organization']['id'] == team.organization.id + @pytest.mark.django_db def test_create_team_credential_via_team_credentials_list(post, get, team, org_admin, team_member): response = post(reverse('api:team_credentials_list', args=(team.pk,)), { diff --git a/awx/main/tests/functional/test_rbac_credential.py b/awx/main/tests/functional/test_rbac_credential.py index 8cac236fee..ae68f036d8 100644 --- a/awx/main/tests/functional/test_rbac_credential.py +++ b/awx/main/tests/functional/test_rbac_credential.py @@ -54,21 +54,40 @@ def test_credential_migration_team_member(credential, team, user, permissions): rbac.migrate_credential(apps, None) - # Admin permissions post migration + # User permissions post migration assert u in credential.use_role + assert u not in credential.admin_role @pytest.mark.django_db def test_credential_migration_team_admin(credential, team, user, permissions): u = user('user', False) - team.member_role.members.add(u) + team.admin_role.members.add(u) credential.deprecated_team = team credential.save() assert u not in credential.use_role - # Usage permissions post migration + # Admin permissions post migration rbac.migrate_credential(apps, None) - assert u in credential.use_role + assert u in credential.admin_role + +@pytest.mark.django_db +def test_credential_migration_org_auditor(credential, team, org_auditor): + # Team's organization is the org_auditor's org + credential.deprecated_team = team + credential.save() + + # No permissions pre-migration (this happens automatically so we patch this) + team.admin_role.children.remove(credential.admin_role) + team.member_role.children.remove(credential.use_role) + assert org_auditor not in credential.read_role + + rbac.migrate_credential(apps, None) + rbac.infer_credential_org_from_team(apps, None) + + # Read permissions post migration + assert org_auditor not in credential.use_role + assert org_auditor in credential.read_role def test_credential_access_superuser(): u = User(username='admin', is_superuser=True) diff --git a/awx/main/tests/old/users.py b/awx/main/tests/old/users.py index de364ff161..df2d5e19bc 100644 --- a/awx/main/tests/old/users.py +++ b/awx/main/tests/old/users.py @@ -192,8 +192,12 @@ class UsersTest(BaseTest): self.post(url, expect=403, data=new_user, auth=self.get_other_credentials()) self.post(url, expect=201, data=new_user, auth=self.get_super_credentials()) self.post(url, expect=400, data=new_user, auth=self.get_super_credentials()) - self.post(url, expect=201, data=new_user2, auth=self.get_normal_credentials()) - self.post(url, expect=400, data=new_user2, auth=self.get_normal_credentials()) + # org admin cannot create orphaned users + self.post(url, expect=403, data=new_user2, auth=self.get_normal_credentials()) + # org admin can create org users + org_url = reverse('api:organization_users_list', args=(self.organizations[0].pk,)) + self.post(org_url, expect=201, data=new_user2, auth=self.get_normal_credentials()) + self.post(org_url, expect=400, data=new_user2, auth=self.get_normal_credentials()) # Normal user cannot add users after his org is marked inactive. self.organizations[0].delete() new_user3 = dict(username='blippy3') @@ -325,9 +329,9 @@ class UsersTest(BaseTest): detail_url = reverse('api:user_detail', args=(self.other_django_user.pk,)) data = self.get(detail_url, expect=200, auth=self.get_other_credentials()) - # can't change first_name, last_name, etc + # can change first_name, last_name, etc data['last_name'] = "NewLastName" - self.put(detail_url, data, expect=403, auth=self.get_other_credentials()) + self.put(detail_url, data, expect=200, auth=self.get_other_credentials()) # can't change username data['username'] = 'newUsername' @@ -367,23 +371,20 @@ class UsersTest(BaseTest): url = reverse('api:user_list') data = dict(username='username', password='password') data2 = dict(username='username2', password='password2') - data = self.post(url, expect=201, data=data, auth=self.get_normal_credentials()) + # but a regular user cannot create users + self.post(url, expect=403, data=data2, auth=self.get_other_credentials()) + # org admins cannot create orphaned users + self.post(url, expect=403, data=data2, auth=self.get_normal_credentials()) + + # a super user can create new users + self.post(url, expect=201, data=data, auth=self.get_super_credentials()) # verify that the login works... self.get(url, expect=200, auth=('username', 'password')) - # but a regular user cannot - data = self.post(url, expect=403, data=data2, auth=self.get_other_credentials()) - - # a super user can also create new users - data = self.post(url, expect=201, data=data2, auth=self.get_super_credentials()) - - # verify that the login works - self.get(url, expect=200, auth=('username2', 'password2')) - # verify that if you post a user with a pk, you do not alter that user's password info mod = dict(id=self.super_django_user.pk, username='change', password='change') - data = self.post(url, expect=201, data=mod, auth=self.get_super_credentials()) + self.post(url, expect=201, data=mod, auth=self.get_super_credentials()) orig = User.objects.get(pk=self.super_django_user.pk) self.assertTrue(orig.username != 'change') diff --git a/awx/plugins/inventory/cloudforms.py b/awx/plugins/inventory/cloudforms.py index 8d9854974f..65d95853d5 100755 --- a/awx/plugins/inventory/cloudforms.py +++ b/awx/plugins/inventory/cloudforms.py @@ -1,144 +1,462 @@ #!/usr/bin/python +# vim: set fileencoding=utf-8 : +# +# Copyright (C) 2016 Guido Günther +# +# This script is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with it. If not, see . +# +# This is loosely based on the foreman inventory script +# -- Josh Preston +# -''' -CloudForms external inventory script -================================================== -Generates inventory that Ansible can understand by making API request to CloudForms. -Modeled after https://raw.githubusercontent.com/ansible/ansible/stable-1.9/plugins/inventory/ec2.py -jlabocki redhat.com or @jameslabocki on twitter -''' - -import os +from __future__ import print_function import argparse import ConfigParser +import os +import re +from time import time import requests -import json +from requests.auth import HTTPBasicAuth +import warnings -# This disables warnings and is not a good idea, but hey, this is a demo -# http://urllib3.readthedocs.org/en/latest/security.html#disabling-warnings -requests.packages.urllib3.disable_warnings() +try: + import json +except ImportError: + import simplejson as json class CloudFormsInventory(object): - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - def __init__(self): - ''' Main execution path ''' + """ + Main execution path + """ + self.inventory = dict() # A list of groups and the hosts in that group + self.hosts = dict() # Details about hosts in the inventory - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - # Index of hostname (address) to instance ID - self.index = {} - - # Read CLI arguments - self.read_settings() + # Parse CLI arguments self.parse_cli_args() - # Get Hosts - if self.args.list: - self.get_hosts() + # Read settings + self.read_settings() - # This doesn't exist yet and needs to be added + # Cache + if self.args.refresh_cache or not self.is_cache_valid(): + self.update_cache() + else: + self.load_inventory_from_cache() + self.load_hosts_from_cache() + + data_to_print = "" + + # Data to print if self.args.host: - data2 = {} - print json.dumps(data2, indent=2) + if self.args.debug: + print("Fetching host [%s]" % self.args.host) + data_to_print += self.get_host_info(self.args.host) + else: + self.inventory['_meta'] = {'hostvars': {}} + for hostname in self.hosts: + self.inventory['_meta']['hostvars'][hostname] = { + 'cloudforms': self.hosts[hostname], + } + # include the ansible_ssh_host in the top level + if 'ansible_ssh_host' in self.hosts[hostname]: + self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host'] - def parse_cli_args(self): - ''' Command line argument processing ''' + data_to_print += self.json_format_dict(self.inventory, self.args.pretty) - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms') - parser.add_argument('--list', action='store_true', default=False, - help='List instances (default: False)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - self.args = parser.parse_args() + print(data_to_print) + + def is_cache_valid(self): + """ + Determines if the cache files have expired, or if it is still valid + """ + if self.args.debug: + print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age)) + + if os.path.isfile(self.cache_path_hosts): + mod_time = os.path.getmtime(self.cache_path_hosts) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_inventory): + if self.args.debug: + print("Cache is still valid!") + return True + + if self.args.debug: + print("Cache is stale or does not exist.") + + return False def read_settings(self): - ''' Reads the settings from the cloudforms.ini file ''' - + """ + Reads the settings from the cloudforms.ini file + """ config = ConfigParser.SafeConfigParser() config_paths = [ - os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cloudforms.ini'), - "/opt/rh/cloudforms.ini", + os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini', + "/etc/ansible/cloudforms.ini", ] env_value = os.environ.get('CLOUDFORMS_INI_PATH') if env_value is not None: config_paths.append(os.path.expanduser(os.path.expandvars(env_value))) + if self.args.debug: + for config_path in config_paths: + print("Reading from configuration file [%s]" % config_path) + config.read(config_paths) - # Version - if config.has_option('cloudforms', 'version'): - self.cloudforms_version = config.get('cloudforms', 'version') + # CloudForms API related + if config.has_option('cloudforms', 'url'): + self.cloudforms_url = config.get('cloudforms', 'url') else: - self.cloudforms_version = "none" + self.cloudforms_url = None - # CloudForms Endpoint - if config.has_option('cloudforms', 'hostname'): - self.cloudforms_hostname = config.get('cloudforms', 'hostname') - else: - self.cloudforms_hostname = None + if not self.cloudforms_url: + warnings.warn("No url specified, expected something like 'https://cfme.example.com'") - # CloudForms Username if config.has_option('cloudforms', 'username'): self.cloudforms_username = config.get('cloudforms', 'username') else: - self.cloudforms_username = "none" + self.cloudforms_username = None + + if not self.cloudforms_username: + warnings.warn("No username specified, you need to specify a CloudForms username.") - # CloudForms Password if config.has_option('cloudforms', 'password'): - self.cloudforms_password = config.get('cloudforms', 'password') + self.cloudforms_pw = config.get('cloudforms', 'password') else: - self.cloudforms_password = "none" + self.cloudforms_pw = None - def get_hosts(self): - ''' Gets host from CloudForms ''' - r = requests.get("https://{0}/api/vms?expand=resources&attributes=all".format(self.cloudforms_hostname), - auth=(self.cloudforms_username, self.cloudforms_password), verify=False) - obj = r.json() + if not self.cloudforms_pw: + warnings.warn("No password specified, you need to specify a password for the CloudForms user.") - # Create groups+hosts based on host data - for resource in obj.get('resources', []): + if config.has_option('cloudforms', 'ssl_verify'): + self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify') + else: + self.cloudforms_ssl_verify = True - # Maintain backwards compat by creating `Dynamic_CloudForms` group - if 'Dynamic_CloudForms' not in self.inventory: - self.inventory['Dynamic_CloudForms'] = [] - self.inventory['Dynamic_CloudForms'].append(resource['name']) + if config.has_option('cloudforms', 'version'): + self.cloudforms_version = config.get('cloudforms', 'version') + else: + self.cloudforms_version = None - # Add host to desired groups - for key in ('vendor', 'type', 'location'): - if key in resource: - # Create top-level group - if key not in self.inventory: - self.inventory[key] = dict(children=[], vars={}, hosts=[]) - # if resource['name'] not in self.inventory[key]['hosts']: - # self.inventory[key]['hosts'].append(resource['name']) + if config.has_option('cloudforms', 'limit'): + self.cloudforms_limit = config.getint('cloudforms', 'limit') + else: + self.cloudforms_limit = 100 - # Create sub-group - if resource[key] not in self.inventory: - self.inventory[resource[key]] = dict(children=[], vars={}, hosts=[]) - # self.inventory[resource[key]]['hosts'].append(resource['name']) + if config.has_option('cloudforms', 'purge_actions'): + self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions') + else: + self.cloudforms_purge_actions = True - # Add sub-group, as a child of top-level - if resource[key] not in self.inventory[key]['children']: - self.inventory[key]['children'].append(resource[key]) + if config.has_option('cloudforms', 'clean_group_keys'): + self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys') + else: + self.cloudforms_clean_group_keys = True + if config.has_option('cloudforms', 'nest_tags'): + self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags') + else: + self.cloudforms_nest_tags = False + + # Ansible related + try: + group_patterns = config.get('ansible', 'group_patterns') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + group_patterns = "[]" + + self.group_patterns = eval(group_patterns) + + # Cache related + try: + cache_path = os.path.expanduser(config.get('cache', 'path')) + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + cache_path = '.' + (script, ext) = os.path.splitext(os.path.basename(__file__)) + self.cache_path_hosts = cache_path + "/%s.hosts" % script + self.cache_path_inventory = cache_path + "/%s.inventory" % script + self.cache_max_age = config.getint('cache', 'max_age') + + if self.args.debug: + print("CloudForms settings:") + print("cloudforms_url = %s" % self.cloudforms_url) + print("cloudforms_username = %s" % self.cloudforms_username) + print("cloudforms_pw = %s" % self.cloudforms_pw) + print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify) + print("cloudforms_version = %s" % self.cloudforms_version) + print("cloudforms_limit = %s" % self.cloudforms_limit) + print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions) + print("Cache settings:") + print("cache_max_age = %s" % self.cache_max_age) + print("cache_path_hosts = %s" % self.cache_path_hosts) + print("cache_path_inventory = %s" % self.cache_path_inventory) + + def parse_cli_args(self): + """ + Command line argument processing + """ + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)') + parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)') + self.args = parser.parse_args() + + def _get_json(self, url): + """ + Make a request and return the JSON + """ + results = [] + + ret = requests.get(url, + auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw), + verify=self.cloudforms_ssl_verify) + + ret.raise_for_status() + + try: + results = json.loads(ret.text) + except ValueError: + warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason)) + results = {} + + if self.args.debug: + print("=======================================================================") + print("=======================================================================") + print("=======================================================================") + print(ret.text) + print("=======================================================================") + print("=======================================================================") + print("=======================================================================") + + return results + + def _get_hosts(self): + """ + Get all hosts by paging through the results + """ + limit = self.cloudforms_limit + + page = 0 + last_page = False + + results = [] + + while not last_page: + offset = page * limit + ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit)) + results += ret['resources'] + if ret['subcount'] < limit: + last_page = True + page += 1 + + return results + + def update_cache(self): + """ + Make calls to cloudforms and save the output in a cache + """ + self.groups = dict() + self.hosts = dict() + + if self.args.debug: + print("Updating cache...") + + for host in self._get_hosts(): + # Ignore VMs that are not powered on + if host['power_state'] != 'on': + if self.args.debug: + print("Skipping %s because power_state = %s" % (host['name'], host['power_state'])) + continue + + # purge actions + if self.cloudforms_purge_actions and 'actions' in host: + del host['actions'] + + # Create ansible groups for tags + if 'tags' in host: + + # Create top-level group + if 'tags' not in self.inventory: + self.inventory['tags'] = dict(children=[], vars={}, hosts=[]) + + if not self.cloudforms_nest_tags: + # don't expand tags, just use them in a safe way + for group in host['tags']: + # Add sub-group, as a child of top-level + safe_key = self.to_safe(group['name']) + if safe_key: + if self.args.debug: + print("Adding sub-group '%s' to parent 'tags'" % safe_key) + + if safe_key not in self.inventory['tags']['children']: + self.push(self.inventory['tags'], 'children', safe_key) + + self.push(self.inventory, safe_key, host['name']) + + if self.args.debug: + print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key)) + else: + # expand the tags into nested groups / sub-groups + # Create nested groups for tags + safe_parent_tag_name = 'tags' + for tag in host['tags']: + tag_hierarchy = tag['name'][1:].split('/') + + if self.args.debug: + print("Working on list %s" % tag_hierarchy) + + for tag_name in tag_hierarchy: + if self.args.debug: + print("Working on tag_name = %s" % tag_name) + + safe_tag_name = self.to_safe(tag_name) + if self.args.debug: + print("Using sanitized name %s" % safe_tag_name) + + # Create sub-group + if safe_tag_name not in self.inventory: + self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[]) + + # Add sub-group, as a child of top-level + if safe_parent_tag_name: + if self.args.debug: + print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name)) + + if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']: + self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name) + + # Make sure the next one uses this one as it's parent + safe_parent_tag_name = safe_tag_name + + # Add the host to the last tag + self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name']) + + # Set ansible_ssh_host to the first available ip address + if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list): + host['ansible_ssh_host'] = host['ipaddresses'][0] + + # Create additional groups + for key in ('location', 'type', 'vendor'): + safe_key = self.to_safe(host[key]) + + # Create top-level group + if key not in self.inventory: + self.inventory[key] = dict(children=[], vars={}, hosts=[]) + + # Create sub-group + if safe_key not in self.inventory: + self.inventory[safe_key] = dict(children=[], vars={}, hosts=[]) + + # Add sub-group, as a child of top-level + if safe_key not in self.inventory[key]['children']: + self.push(self.inventory[key], 'children', safe_key) + + if key in host: # Add host to sub-group - if resource['name'] not in self.inventory[resource[key]]: - self.inventory[resource[key]]['hosts'].append(resource['name']) + self.push(self.inventory[safe_key], 'hosts', host['name']) - # Delete 'actions' key - del resource['actions'] + self.hosts[host['name']] = host + self.push(self.inventory, 'all', host['name']) - # Add _meta hostvars - self.inventory['_meta']['hostvars'][resource['name']] = resource + if self.args.debug: + print("Saving cached data") - print json.dumps(self.inventory, indent=2) + self.write_to_cache(self.hosts, self.cache_path_hosts) + self.write_to_cache(self.inventory, self.cache_path_inventory) + + def get_host_info(self, host): + """ + Get variables about a specific host + """ + if not self.hosts or len(self.hosts) == 0: + # Need to load cache from cache + self.load_hosts_from_cache() + + if host not in self.hosts: + if self.args.debug: + print("[%s] not found in cache." % host) + + # try updating the cache + self.update_cache() + + if host not in self.hosts: + if self.args.debug: + print("[%s] does not exist after cache update." % host) + # host might not exist anymore + return self.json_format_dict({}, self.args.pretty) + + return self.json_format_dict(self.hosts[host], self.args.pretty) + + def push(self, d, k, v): + """ + Safely puts a new entry onto an array. + """ + if k in d: + d[k].append(v) + else: + d[k] = [v] + + def load_inventory_from_cache(self): + """ + Reads the inventory from the cache file sets self.inventory + """ + cache = open(self.cache_path_inventory, 'r') + json_inventory = cache.read() + self.inventory = json.loads(json_inventory) + + def load_hosts_from_cache(self): + """ + Reads the cache from the cache file sets self.hosts + """ + cache = open(self.cache_path_hosts, 'r') + json_cache = cache.read() + self.hosts = json.loads(json_cache) + + def write_to_cache(self, data, filename): + """ + Writes data in JSON format to a file + """ + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + """ + Converts 'bad' characters in a string to underscores so they can be used as Ansible groups + """ + if self.cloudforms_clean_group_keys: + regex = "[^A-Za-z0-9\_]" + return re.sub(regex, "_", word.replace(" ", "")) + else: + return word + + def json_format_dict(self, data, pretty=False): + """ + Converts a dict to a JSON object and dumps it as a formatted string + """ + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) -# Run the script CloudFormsInventory() + diff --git a/awx/plugins/inventory/foreman.py b/awx/plugins/inventory/foreman.py index ce057690df..ddcb912fd5 100755 --- a/awx/plugins/inventory/foreman.py +++ b/awx/plugins/inventory/foreman.py @@ -1,8 +1,6 @@ -#!/usr/bin/python +#!/usr/bin/env python # vim: set fileencoding=utf-8 : # -# NOTE FOR TOWER: change foreman_ to sattelite_ for the group prefix -# # Copyright (C) 2016 Guido Günther # # This script is free software: you can redistribute it and/or modify @@ -41,6 +39,7 @@ class ForemanInventory(object): self.inventory = dict() # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory self.params = dict() # Params of each host + self.facts = dict() # Facts of each host self.hostgroups = dict() # host groups # Read settings and parse CLI arguments @@ -55,6 +54,7 @@ class ForemanInventory(object): else: self.load_inventory_from_cache() self.load_params_from_cache() + self.load_facts_from_cache() self.load_cache_from_cache() data_to_print = "" @@ -69,6 +69,9 @@ class ForemanInventory(object): 'foreman': self.cache[hostname], 'foreman_params': self.params[hostname], } + if self.want_facts: + self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname] + data_to_print += self.json_format_dict(self.inventory, True) print(data_to_print) @@ -81,7 +84,8 @@ class ForemanInventory(object): current_time = time() if (mod_time + self.cache_max_age) > current_time: if (os.path.isfile(self.cache_path_inventory) and - os.path.isfile(self.cache_path_params)): + os.path.isfile(self.cache_path_params) and + os.path.isfile(self.cache_path_facts)): return True return False @@ -114,6 +118,16 @@ class ForemanInventory(object): self.group_patterns = eval(group_patterns) + try: + self.group_prefix = config.get('ansible', 'group_prefix') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.group_prefix = "foreman_" + + try: + self.want_facts = config.getboolean('ansible', 'want_facts') + except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): + self.want_facts = True + # Cache related try: cache_path = os.path.expanduser(config.get('cache', 'path')) @@ -123,6 +137,7 @@ class ForemanInventory(object): self.cache_path_cache = cache_path + "/%s.cache" % script self.cache_path_inventory = cache_path + "/%s.index" % script self.cache_path_params = cache_path + "/%s.params" % script + self.cache_path_facts = cache_path + "/%s.facts" % script self.cache_max_age = config.getint('cache', 'max_age') def parse_cli_args(self): @@ -135,7 +150,7 @@ class ForemanInventory(object): help='Force refresh of cache by making API requests to foreman (default: False - use cache files)') self.args = parser.parse_args() - def _get_json(self, url): + def _get_json(self, url, ignore_errors=None): page = 1 results = [] while True: @@ -143,10 +158,14 @@ class ForemanInventory(object): auth=HTTPBasicAuth(self.foreman_user, self.foreman_pw), verify=self.foreman_ssl_verify, params={'page': page, 'per_page': 250}) + if ignore_errors and ret.status_code in ignore_errors: + break ret.raise_for_status() json = ret.json() if not json.has_key('results'): return json + if type(json['results']) == type({}): + return json['results'] results = results + json['results'] if len(results) >= json['total']: break @@ -162,38 +181,44 @@ class ForemanInventory(object): self.hostgroups[hid] = self._get_json(url) return self.hostgroups[hid] - def _get_params_by_id(self, hid): - url = "%s/api/v2/hosts/%s/parameters" % (self.foreman_url, hid) + def _get_all_params_by_id(self, hid): + url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid) + ret = self._get_json(url, [404]) + if ret == []: ret = {} + return ret.get('all_parameters', {}) + + def _get_facts_by_id(self, hid): + url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid) return self._get_json(url) def _resolve_params(self, host): """ - Resolve all host group params of the host using the top level - hostgroup and the ancestry. + Fetch host params and convert to dict """ - hostgroup_id = host['hostgroup_id'] - paramgroups = [] params = {} - if hostgroup_id: - hostgroup = self._get_hostgroup_by_id(hostgroup_id) - ancestry_path = hostgroup.get('ancestry', '') - ancestry = ancestry_path.split('/') if ancestry_path is not None else [] - - # Append top level hostgroup last to overwrite lower levels - # values - ancestry.append(hostgroup_id) - paramgroups = [self._get_hostgroup_by_id(hostgroup_id)['parameters'] - for hostgroup_id in ancestry] - - paramgroups += [self._get_params_by_id(host['id'])] - for paramgroup in paramgroups: - for param in paramgroup: - name = param['name'] - params[name] = param['value'] + for param in self._get_all_params_by_id(host['id']): + name = param['name'] + params[name] = param['value'] return params + def _get_facts(self, host): + """ + Fetch all host facts of the host + """ + if not self.want_facts: + return {} + + ret = self._get_facts_by_id(host['id']) + if len(ret.values()) == 0: + facts = {} + elif len(ret.values()) == 1: + facts = ret.values()[0] + else: + raise ValueError("More than one set of facts returned for '%s'" % host) + return facts + def update_cache(self): """Make calls to foreman and save the output in a cache""" @@ -203,11 +228,17 @@ class ForemanInventory(object): for host in self._get_hosts(): dns_name = host['name'] - # Create ansible groups for hostgroup, location and organization - for group in ['hostgroup', 'location', 'organization']: + # Create ansible groups for hostgroup, environment, location and organization + for group in ['hostgroup', 'environment', 'location', 'organization']: val = host.get('%s_name' % group) if val: - safe_key = self.to_safe('satellite_%s_%s' % (group, val.lower())) + safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) + self.push(self.inventory, safe_key, dns_name) + + for group in ['lifecycle_environment', 'content_view']: + val = host.get('content_facet_attributes', {}).get('%s_name' % group) + if val: + safe_key = self.to_safe('%s%s_%s' % (self.group_prefix, group, val.lower())) self.push(self.inventory, safe_key, dns_name) params = self._resolve_params(host) @@ -231,11 +262,13 @@ class ForemanInventory(object): self.cache[dns_name] = host self.params[dns_name] = params + self.facts[dns_name] = self._get_facts(host) self.push(self.inventory, 'all', dns_name) self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) self.write_to_cache(self.params, self.cache_path_params) + self.write_to_cache(self.facts, self.cache_path_facts) def get_host_info(self): """ Get variables about a specific host """ @@ -274,6 +307,14 @@ class ForemanInventory(object): json_params = cache.read() self.params = json.loads(json_params) + def load_facts_from_cache(self): + """ Reads the index from the cache file sets self.index """ + if not self.want_facts: + return + cache = open(self.cache_path_facts, 'r') + json_facts = cache.read() + self.facts = json.loads(json_facts) + def load_cache_from_cache(self): """ Reads the cache from the cache file sets self.cache """ @@ -301,4 +342,7 @@ class ForemanInventory(object): else: return json.dumps(data) -ForemanInventory() +if __name__ == '__main__': + ForemanInventory() + + diff --git a/awx/sso/__init__.py b/awx/sso/__init__.py index e484e62be1..347aedfeee 100644 --- a/awx/sso/__init__.py +++ b/awx/sso/__init__.py @@ -1,2 +1,21 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. + +# Python +import threading + +# Monkeypatch xmlsec.initialize() to only run once (https://github.com/ansible/ansible-tower/issues/3241). +xmlsec_init_lock = threading.Lock() +xmlsec_initialized = False + +import dm.xmlsec.binding +original_xmlsec_initialize = dm.xmlsec.binding.initialize + +def xmlsec_initialize(*args, **kwargs): + global xmlsec_init_lock, xmlsec_initialized, original_xmlsec_initialize + with xmlsec_init_lock: + if not xmlsec_initialized: + original_xmlsec_initialize(*args, **kwargs) + xmlsec_initialized = True + +dm.xmlsec.binding.initialize = xmlsec_initialize diff --git a/awx/ui/client/src/management-jobs/scheduler/schedulerForm.partial.html b/awx/ui/client/src/management-jobs/scheduler/schedulerForm.partial.html index 0592a0cfeb..b619ead47a 100644 --- a/awx/ui/client/src/management-jobs/scheduler/schedulerForm.partial.html +++ b/awx/ui/client/src/management-jobs/scheduler/schedulerForm.partial.html @@ -38,9 +38,6 @@
@@ -487,9 +484,6 @@
diff --git a/awx/ui/client/src/scheduler/schedulerForm.partial.html b/awx/ui/client/src/scheduler/schedulerForm.partial.html index e3fffecec7..fb13937b64 100644 --- a/awx/ui/client/src/scheduler/schedulerForm.partial.html +++ b/awx/ui/client/src/scheduler/schedulerForm.partial.html @@ -38,9 +38,6 @@
@@ -469,9 +466,6 @@