From 300020df0711670305e7ceb463d14770935bfbb0 Mon Sep 17 00:00:00 2001 From: Aaron Tan Date: Wed, 10 Aug 2016 14:33:25 -0400 Subject: [PATCH 01/37] Make system admin and system auditor visible to oprhaned users. --- awx/main/models/rbac.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/awx/main/models/rbac.py b/awx/main/models/rbac.py index 3cb016ffde..8592a9c632 100644 --- a/awx/main/models/rbac.py +++ b/awx/main/models/rbac.py @@ -376,12 +376,13 @@ class Role(models.Model): @staticmethod @check_singleton - def visible_roles(user): + def visible_roles(user, include_super=True): sql_params = { 'ancestors_table': Role.ancestors.through._meta.db_table, 'parents_table': Role.parents.through._meta.db_table, 'roles_table': Role._meta.db_table, - 'ids': ','.join(str(x) for x in user.roles.values_list('id', flat=True)) + 'ids': ','.join(str(x) for x in user.roles.values_list('id', flat=True)), + 'mandatories': ','.join(('\'system_administrator\'', '\'system_auditor\'')), } qs = Role.objects.extra( @@ -394,6 +395,17 @@ class Role(models.Model): ) ''' % sql_params] ) + if include_super: + super_qs = Role.objects.extra( + where = [''' + %(roles_table)s.id IN ( + SELECT DISTINCT visible_roles_t3.id + FROM %(roles_table)s as visible_roles_t3 + WHERE visible_roles_t3.singleton_name IN (%(mandatories)s) + ) + ''' % sql_params] + ) + qs = qs | super_qs return qs @staticmethod From b719b7276f34873e4d43958651ea0e2bd1d6e7ba Mon Sep 17 00:00:00 2001 From: jangsutsr Date: Sun, 14 Aug 2016 20:10:45 -0400 Subject: [PATCH 02/37] Refactor for better performance. --- awx/api/views.py | 11 ++++++++++- awx/main/models/rbac.py | 14 +------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/awx/api/views.py b/awx/api/views.py index efbbecf10e..189d687222 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -3830,7 +3830,16 @@ class RoleList(ListAPIView): new_in_300 = True def get_queryset(self): - return Role.visible_roles(self.request.user) + result = Role.visible_roles(self.request.user) + # Sanity check: is the requesting user an orphaned non-admin/auditor? + # if yes, make system admin/auditor mandatorily visible. + if not self.request.user.organizations.exists() and\ + not self.request.user.is_superuser and\ + not self.request.user.is_system_auditor: + mandatories = ('system_administrator', 'system_auditor') + super_qs = Role.objects.filter(singleton_name__in=mandatories) + result = result | super_qs + return result class RoleDetail(RetrieveAPIView): diff --git a/awx/main/models/rbac.py b/awx/main/models/rbac.py index 8592a9c632..f469c1a7ac 100644 --- a/awx/main/models/rbac.py +++ b/awx/main/models/rbac.py @@ -376,13 +376,12 @@ class Role(models.Model): @staticmethod @check_singleton - def visible_roles(user, include_super=True): + def visible_roles(user): sql_params = { 'ancestors_table': Role.ancestors.through._meta.db_table, 'parents_table': Role.parents.through._meta.db_table, 'roles_table': Role._meta.db_table, 'ids': ','.join(str(x) for x in user.roles.values_list('id', flat=True)), - 'mandatories': ','.join(('\'system_administrator\'', '\'system_auditor\'')), } qs = Role.objects.extra( @@ -395,17 +394,6 @@ class Role(models.Model): ) ''' % sql_params] ) - if include_super: - super_qs = Role.objects.extra( - where = [''' - %(roles_table)s.id IN ( - SELECT DISTINCT visible_roles_t3.id - FROM %(roles_table)s as visible_roles_t3 - WHERE visible_roles_t3.singleton_name IN (%(mandatories)s) - ) - ''' % sql_params] - ) - qs = qs | super_qs return qs @staticmethod From 89cbceeab88dad7b35f43f7b9c8c5f136e15b379 Mon Sep 17 00:00:00 2001 From: Aaron Tan Date: Tue, 20 Sep 2016 11:39:44 -0400 Subject: [PATCH 03/37] Functional test added. --- awx/main/tests/functional/api/test_role.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 awx/main/tests/functional/api/test_role.py diff --git a/awx/main/tests/functional/api/test_role.py b/awx/main/tests/functional/api/test_role.py new file mode 100644 index 0000000000..94215521a5 --- /dev/null +++ b/awx/main/tests/functional/api/test_role.py @@ -0,0 +1,13 @@ +import pytest + +from django.core.urlresolvers import reverse + +@pytest.mark.django_db +def test_admin_visible_to_orphaned_users(get, alice): + names = set() + + response = get(reverse('api:role_list'), user=alice) + for item in response.data['results']: + names.add(item['name']) + assert 'System Auditor' in names + assert 'System Administrator' in names From 2f24d286385edc75ddf9c17e6ed54f53f5e14a04 Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Mon, 26 Sep 2016 10:35:29 -0400 Subject: [PATCH 04/37] fix bug where user content_object has no name attribute in access_list --- awx/api/serializers.py | 5 ++++- awx/main/tests/functional/api/test_rbac_displays.py | 6 ++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 5b45c9de5b..79199c27a0 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -1581,10 +1581,13 @@ class ResourceAccessListElementSerializer(UserSerializer): def format_role_perm(role): role_dict = { 'id': role.id, 'name': role.name, 'description': role.description} - if role.content_type is not None: + try: role_dict['resource_name'] = role.content_object.name role_dict['resource_type'] = role.content_type.name role_dict['related'] = reverse_gfk(role.content_object) + except: + pass + if role.content_type is not None: role_dict['user_capabilities'] = {'unattach': requesting_user.can_access( Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)} else: diff --git a/awx/main/tests/functional/api/test_rbac_displays.py b/awx/main/tests/functional/api/test_rbac_displays.py index eb94e01dff..45b4a8f832 100644 --- a/awx/main/tests/functional/api/test_rbac_displays.py +++ b/awx/main/tests/functional/api/test_rbac_displays.py @@ -221,6 +221,12 @@ class TestAccessListCapabilities: direct_access_list = response.data['results'][0]['summary_fields']['direct_access'] assert direct_access_list[0]['role']['user_capabilities']['unattach'] == 'foobar' + def test_user_access_list_direct_access_capability(self, rando, get): + "When a user views their own access list, they can not unattach their admin role" + response = get(reverse('api:user_access_list', args=(rando.id,)), rando) + direct_access_list = response.data['results'][0]['summary_fields']['direct_access'] + assert not direct_access_list[0]['role']['user_capabilities']['unattach'] + @pytest.mark.django_db def test_team_roles_unattach(mocker, team, team_member, inventory, mock_access_method, get): From 3951f63df57b6df0f45b729cd7c083cd6584b508 Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Mon, 26 Sep 2016 11:31:18 -0400 Subject: [PATCH 05/37] add exception type to try-except for access_list details --- awx/api/serializers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 79199c27a0..6e8a91c72a 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -1585,7 +1585,7 @@ class ResourceAccessListElementSerializer(UserSerializer): role_dict['resource_name'] = role.content_object.name role_dict['resource_type'] = role.content_type.name role_dict['related'] = reverse_gfk(role.content_object) - except: + except AttributeError: pass if role.content_type is not None: role_dict['user_capabilities'] = {'unattach': requesting_user.can_access( From 6ebe45b1bd46c523479d2b5ff5806522c04c13d4 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 26 Sep 2016 22:14:47 -0400 Subject: [PATCH 06/37] Configure Tower in Tower: * Add separate Django app for configuration: awx.conf. * Migrate from existing main.TowerSettings model to conf.Setting. * Add settings wrapper to allow get/set/del via django.conf.settings. * Update existing references to tower_settings to use django.conf.settings. * Add a settings registry to allow for each Django app to register configurable settings. * Support setting validation and conversion using Django REST Framework fields. * Add /api/v1/settings/ to display a list of setting categories. * Add /api/v1/settings// to display all settings in a category as a single object. * Allow PUT/PATCH to update setting singleton, DELETE to reset to defaults. * Add "all" category to display all settings across categories. * Add "changed" category to display only settings configured in the database. * Support per-user settings via "user" category (/api/v1/settings/user/). * Support defaults for user settings via "user-defaults" category (/api/v1/settings/user-defaults/). * Update serializer metadata to support category, category_slug and placeholder on OPTIONS responses. * Update serializer metadata to handle child fields of a list/dict. * Hide raw data form in browsable API for OPTIONS and DELETE. * Combine existing licensing code into single "TaskEnhancer" class. * Move license helper functions from awx.api.license into awx.conf.license. * Update /api/v1/config/ to read/verify/update license using TaskEnhancer and settings wrapper. * Add support for caching settings accessed via settings wrapper. * Invalidate cached settings when Setting model changes or is deleted. * Preload all database settings into cache on first access via settings wrapper. * Add support for read-only settings than can update their value depending on other settings. * Use setting_changed signal whenever a setting changes. * Register configurable authentication, jobs, system and ui settings. * Register configurable LDAP, RADIUS and social auth settings. * Add custom fields and validators for URL, LDAP, RADIUS and social auth settings. * Rewrite existing validator for Credential ssh_private_key to support validating private keys, certs or combinations of both. * Get all unit/functional tests working with above changes. * Add "migrate_to_database_settings" command to determine settings to be migrated into the database and comment them out when set in Python settings files. * Add support for migrating license key from file to database. * Remove database-configuable settings from local_settings.py example files. * Update setup role to no longer install files for database-configurable settings. f 94ff6ee More settings work. f af4c4e0 Even more db settings stuff. f 96ea9c0 More settings, attempt at singleton serializer for settings. f 937c760 More work on singleton/category views in API, add code to comment out settings in Python files, work on command to migrate settings to database. f 425b0d3 Minor fixes for sprint demo. f ea402a4 Add support for read-only settings, cleanup license engine, get license support working with DB settings. f ec289e4 Rename migration, minor fixmes, update setup role. f 603640b Rewrite key/cert validator, finish adding social auth fields, hook up signals for setting_changed, use None to imply a setting is not set. f 67d1b5a Get functional/unit tests passing. f 2919b62 Flake8 fixes. f e62f421 Add redbaron to requirements, get file to database migration working (except for license). f c564508 Add support for migrating license file. f 982f767 Add support for regex in social map fields. --- .gitignore | 2 +- awx/api/authentication.py | 6 +- awx/api/conf.py | 35 + awx/api/generics.py | 1 + awx/api/license.py | 51 - awx/api/management/commands/uses_mongo.py | 5 +- awx/api/metadata.py | 25 +- awx/api/renderers.py | 14 +- awx/api/serializers.py | 65 +- awx/api/templates/api/_new_in_awx.md | 1 + awx/api/urls.py | 6 +- awx/api/views.py | 130 +-- awx/conf/__init__.py | 18 + awx/conf/access.py | 45 + awx/conf/apps.py | 16 + awx/conf/conf.py | 95 ++ awx/conf/fields.py | 55 + awx/conf/license.py | 52 + awx/conf/management/__init__.py | 0 awx/conf/management/commands/__init__.py | 0 .../commands/migrate_to_database_settings.py | 328 ++++++ awx/conf/migrations/0001_initial.py | 30 + .../0002_v310_copy_tower_settings.py | 79 ++ awx/conf/migrations/__init__.py | 0 awx/conf/models.py | 59 ++ awx/conf/registry.py | 121 +++ awx/conf/serializers.py | 79 ++ awx/conf/settings.py | 273 +++++ awx/conf/signals.py | 69 ++ awx/conf/urls.py | 15 + awx/conf/utils.py | 110 ++ awx/conf/views.py | 129 +++ awx/main/__init__.py | 2 + awx/main/access.py | 26 +- awx/main/apps.py | 9 + awx/main/conf.py | 209 +++- awx/main/management/commands/cleanup_facts.py | 2 +- .../management/commands/inventory_import.py | 8 +- .../management/commands/update_instance.py | 2 +- awx/main/middleware.py | 4 +- .../0036_v310_remove_tower_settings.py | 22 + awx/main/migrations/_old_access.py | 35 +- awx/main/models/__init__.py | 2 - awx/main/models/ad_hoc_commands.py | 5 +- awx/main/models/configuration.py | 84 -- awx/main/models/credential.py | 137 +-- awx/main/models/inventory.py | 3 +- awx/main/models/jobs.py | 9 +- awx/main/models/organization.py | 9 +- awx/main/models/projects.py | 3 +- awx/main/registrar.py | 7 +- awx/main/signals.py | 11 +- .../south_migrations/0071_v240_changes.py | 2 +- awx/main/tasks.py | 24 +- awx/main/tests/base.py | 24 +- awx/main/tests/data/ssh.py | 28 + .../functional/api/test_activity_streams.py | 33 +- .../functional/api/test_fact_versions.py | 4 +- .../tests/functional/api/test_fact_view.py | 4 +- .../functional/api/test_organizations.py | 4 +- .../tests/functional/api/test_survey_spec.py | 2 +- .../functional/commands/test_cleanup_facts.py | 4 +- .../tests/functional/core/test_licenses.py | 66 +- awx/main/tests/functional/test_rbac_api.py | 2 +- awx/main/tests/old/ad_hoc.py | 7 +- awx/main/tests/old/users.py | 3 +- awx/main/tests/unit/conftest.py | 6 + awx/main/tests/unit/test_credentials.py | 56 - awx/main/tests/unit/test_tasks.py | 11 +- awx/main/tests/unit/test_validators.py | 91 ++ awx/main/utils.py | 14 +- awx/main/validators.py | 168 +++ awx/settings/defaults.py | 203 +--- awx/settings/development.py | 17 +- awx/settings/local_settings.py.docker_compose | 408 +------- awx/settings/local_settings.py.example | 408 +------- awx/settings/postprocess.py | 38 - awx/settings/production.py | 11 +- awx/sso/__init__.py | 3 + awx/sso/apps.py | 9 + awx/sso/backends.py | 18 +- awx/sso/conf.py | 967 ++++++++++++++++++ awx/sso/fields.py | 598 +++++++++++ awx/sso/pipeline.py | 2 +- awx/sso/validators.py | 60 ++ awx/ui/__init__.py | 2 + awx/ui/apps.py | 9 + awx/ui/conf.py | 23 + docs/licenses/baron.txt | 165 +++ docs/licenses/redbaron.txt | 165 +++ docs/licenses/rply.txt | 27 + requirements/requirements.txt | 3 + 92 files changed, 4401 insertions(+), 1791 deletions(-) create mode 100644 awx/api/conf.py delete mode 100644 awx/api/license.py create mode 100644 awx/conf/__init__.py create mode 100644 awx/conf/access.py create mode 100644 awx/conf/apps.py create mode 100644 awx/conf/conf.py create mode 100644 awx/conf/fields.py create mode 100644 awx/conf/license.py create mode 100644 awx/conf/management/__init__.py create mode 100644 awx/conf/management/commands/__init__.py create mode 100644 awx/conf/management/commands/migrate_to_database_settings.py create mode 100644 awx/conf/migrations/0001_initial.py create mode 100644 awx/conf/migrations/0002_v310_copy_tower_settings.py create mode 100644 awx/conf/migrations/__init__.py create mode 100644 awx/conf/models.py create mode 100644 awx/conf/registry.py create mode 100644 awx/conf/serializers.py create mode 100644 awx/conf/settings.py create mode 100644 awx/conf/signals.py create mode 100644 awx/conf/urls.py create mode 100755 awx/conf/utils.py create mode 100644 awx/conf/views.py create mode 100644 awx/main/apps.py create mode 100644 awx/main/migrations/0036_v310_remove_tower_settings.py delete mode 100644 awx/main/models/configuration.py create mode 100644 awx/main/tests/unit/conftest.py delete mode 100644 awx/main/tests/unit/test_credentials.py create mode 100644 awx/main/tests/unit/test_validators.py create mode 100644 awx/main/validators.py delete mode 100644 awx/settings/postprocess.py create mode 100644 awx/sso/apps.py create mode 100644 awx/sso/conf.py create mode 100644 awx/sso/fields.py create mode 100644 awx/sso/validators.py create mode 100644 awx/ui/apps.py create mode 100644 awx/ui/conf.py create mode 100644 docs/licenses/baron.txt create mode 100644 docs/licenses/redbaron.txt create mode 100644 docs/licenses/rply.txt diff --git a/.gitignore b/.gitignore index 7e9ce67b63..20e90fc35c 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,7 @@ .tags1 # Tower -awx/settings/local_settings.py* +awx/settings/local_*.py* awx/*.sqlite3 awx/*.sqlite3_* awx/job_status diff --git a/awx/api/authentication.py b/awx/api/authentication.py index c8143facbd..6be5447507 100644 --- a/awx/api/authentication.py +++ b/awx/api/authentication.py @@ -6,6 +6,7 @@ import urllib import logging # Django +from django.conf import settings from django.utils.timezone import now as tz_now from django.utils.encoding import smart_text @@ -16,7 +17,6 @@ from rest_framework import HTTP_HEADER_ENCODING # AWX from awx.main.models import UnifiedJob, AuthToken -from awx.main.conf import tower_settings logger = logging.getLogger('awx.api.authentication') @@ -93,7 +93,7 @@ class TokenAuthentication(authentication.TokenAuthentication): # Token invalidated due to session limit config being reduced # Session limit reached invalidation will also take place on authentication - if tower_settings.AUTH_TOKEN_PER_USER != -1: + if settings.AUTH_TOKEN_PER_USER != -1: if not token.in_valid_tokens(now=now): token.invalidate(reason='limit_reached') raise exceptions.AuthenticationFailed(AuthToken.reason_long('limit_reached')) @@ -123,6 +123,8 @@ class TokenGetAuthentication(TokenAuthentication): class LoggedBasicAuthentication(authentication.BasicAuthentication): def authenticate(self, request): + if not settings.AUTH_BASIC_ENABLED: + return ret = super(LoggedBasicAuthentication, self).authenticate(request) if ret: username = ret[0].username if ret[0] else '' diff --git a/awx/api/conf.py b/awx/api/conf.py new file mode 100644 index 0000000000..6bbfee1d3d --- /dev/null +++ b/awx/api/conf.py @@ -0,0 +1,35 @@ +# Django +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register + + +register( + 'AUTH_TOKEN_EXPIRATION', + field_class=fields.IntegerField, + min_value=60, + label=_('Idle Time Force Log Out'), + help_text=_('Number of seconds that a user is inactive before they will need to login again.'), + category=_('Authentication'), + category_slug='authentication', +) + +register( + 'AUTH_TOKEN_PER_USER', + field_class=fields.IntegerField, + min_value=-1, + label=_('Maximum number of simultaneous logins'), + help_text=_('Maximum number of simultaneous logins a user may have. To disable enter -1.'), + category=_('Authentication'), + category_slug='authentication', +) + +register( + 'AUTH_BASIC_ENABLED', + field_class=fields.BooleanField, + label=_('Enable HTTP Basic Auth'), + help_text=_('Enable HTTP Basic Auth for the API Browser.'), + category=_('Authentication'), + category_slug='authentication', +) diff --git a/awx/api/generics.py b/awx/api/generics.py index 1a3e5e2910..8e6471c1c7 100644 --- a/awx/api/generics.py +++ b/awx/api/generics.py @@ -150,6 +150,7 @@ class APIView(views.APIView): 'new_in_230': getattr(self, 'new_in_230', False), 'new_in_240': getattr(self, 'new_in_240', False), 'new_in_300': getattr(self, 'new_in_300', False), + 'new_in_310': getattr(self, 'new_in_310', False), } def get_description(self, html=False): diff --git a/awx/api/license.py b/awx/api/license.py deleted file mode 100644 index 1b225e3a1c..0000000000 --- a/awx/api/license.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -from rest_framework.exceptions import APIException - -from awx.main.task_engine import TaskSerializer -from awx.main.utils import memoize - - -class LicenseForbids(APIException): - status_code = 402 - default_detail = 'Your Tower license does not allow that.' - - -@memoize() -def get_license(show_key=False, bypass_database=False): - """Return a dictionary representing the license currently in - place on this Tower instance. - """ - license_reader = TaskSerializer() - if bypass_database: - return license_reader.from_file(show_key=show_key) - return license_reader.from_database(show_key=show_key) - - -def feature_enabled(name, bypass_database=False): - """Return True if the requested feature is enabled, False otherwise. - If the feature does not exist, raise KeyError. - """ - license = get_license(bypass_database=bypass_database) - - # Sanity check: If there is no license, the feature is considered - # to be off. - if 'features' not in license: - return False - - # Return the correct feature flag. - return license['features'].get(name, False) - -def feature_exists(name): - """Return True if the requested feature is enabled, False otherwise. - If the feature does not exist, raise KeyError. - """ - license = get_license() - - # Sanity check: If there is no license, the feature is considered - # to be off. - if 'features' not in license: - return False - - return name in license['features'] diff --git a/awx/api/management/commands/uses_mongo.py b/awx/api/management/commands/uses_mongo.py index 8ea6404f4a..6f77ee47fa 100644 --- a/awx/api/management/commands/uses_mongo.py +++ b/awx/api/management/commands/uses_mongo.py @@ -6,7 +6,7 @@ import sys from optparse import make_option from django.core.management.base import BaseCommand from awx.main.ha import is_ha_environment -from awx.main.task_engine import TaskSerializer +from awx.main.task_engine import TaskEnhancer class Command(BaseCommand): @@ -27,8 +27,7 @@ class Command(BaseCommand): def handle(self, *args, **kwargs): # Get the license data. - license_reader = TaskSerializer() - license_data = license_reader.from_database() + license_data = TaskEnhancer().validate_enhancements() # Does the license have features, at all? # If there is no license yet, then all features are clearly off. diff --git a/awx/api/metadata.py b/awx/api/metadata.py index c326a4a875..b329d83793 100644 --- a/awx/api/metadata.py +++ b/awx/api/metadata.py @@ -29,7 +29,8 @@ class Metadata(metadata.SimpleMetadata): text_attrs = [ 'read_only', 'label', 'help_text', 'min_length', 'max_length', - 'min_value', 'max_value' + 'min_value', 'max_value', + 'category', 'category_slug', ] for attr in text_attrs: @@ -37,6 +38,10 @@ class Metadata(metadata.SimpleMetadata): if value is not None and value != '': field_info[attr] = force_text(value, strings_only=True) + placeholder = getattr(field, 'placeholder', serializers.empty) + if placeholder is not serializers.empty: + field_info['placeholder'] = placeholder + # Update help text for common fields. serializer = getattr(field, 'parent', None) if serializer: @@ -52,9 +57,10 @@ class Metadata(metadata.SimpleMetadata): 'modified': 'Timestamp when this {} was last modified.', } if field.field_name in field_help_text: - opts = serializer.Meta.model._meta.concrete_model._meta - verbose_name = smart_text(opts.verbose_name) - field_info['help_text'] = field_help_text[field.field_name].format(verbose_name) + if hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'): + opts = serializer.Meta.model._meta.concrete_model._meta + verbose_name = smart_text(opts.verbose_name) + field_info['help_text'] = field_help_text[field.field_name].format(verbose_name) # Indicate if a field has a default value. # FIXME: Still isn't showing all default values? @@ -140,11 +146,10 @@ class Metadata(metadata.SimpleMetadata): # For GET method, remove meta attributes that aren't relevant # when reading a field and remove write-only fields. if method == 'GET': - meta.pop('required', None) - meta.pop('read_only', None) - meta.pop('default', None) - meta.pop('min_length', None) - meta.pop('max_length', None) + attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder') + for attr in attrs_to_remove: + meta.pop(attr, None) + meta.get('child', {}).pop(attr, None) if meta.pop('write_only', False): actions['GET'].pop(field) @@ -160,7 +165,7 @@ class Metadata(metadata.SimpleMetadata): # Add version number in which view was added to Tower. added_in_version = '1.2' - for version in ('3.0.0', '2.4.0', '2.3.0', '2.2.0', '2.1.0', '2.0.0', '1.4.8', '1.4.5', '1.4', '1.3'): + for version in ('3.1.0', '3.0.0', '2.4.0', '2.3.0', '2.2.0', '2.1.0', '2.0.0', '1.4.8', '1.4.5', '1.4', '1.3'): if getattr(view, 'new_in_%s' % version.replace('.', ''), False): added_in_version = version break diff --git a/awx/api/renderers.py b/awx/api/renderers.py index 348a8220c4..2a4e17628e 100644 --- a/awx/api/renderers.py +++ b/awx/api/renderers.py @@ -3,6 +3,7 @@ # Django REST Framework from rest_framework import renderers +from rest_framework.request import override_method class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer): @@ -30,6 +31,8 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer): # Set a flag on the view to indiciate to the view/serializer that we're # creating a raw data form for the browsable API. Store the original # request method to determine how to populate the raw data form. + if request.method in {'OPTIONS', 'DELETE'}: + return try: setattr(view, '_raw_data_form_marker', True) setattr(view, '_raw_data_request_method', request.method) @@ -41,10 +44,13 @@ class BrowsableAPIRenderer(renderers.BrowsableAPIRenderer): def get_rendered_html_form(self, data, view, method, request): # Never show auto-generated form (only raw form). obj = getattr(view, 'object', None) - if not self.show_form_for_method(view, method, request, obj): - return - if method in ('DELETE', 'OPTIONS'): - return True # Don't actually need to return a form + if obj is None and hasattr(view, 'get_object') and hasattr(view, 'retrieve'): + obj = view.get_object() + with override_method(view, request, method) as request: + if not self.show_form_for_method(view, method, request, obj): + return + if method in ('DELETE', 'OPTIONS'): + return True # Don't actually need to return a form def get_filter_form(self, data, view, request): # Don't show filter form in browsable API. diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 5b45c9de5b..753fdad8b6 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -40,9 +40,8 @@ from awx.main.models import * # noqa from awx.main.access import get_user_capabilities from awx.main.fields import ImplicitRoleField from awx.main.utils import get_type_for_model, get_model_for_type, build_url, timestamp_apiformat, camelcase_to_underscore, getattrd -from awx.main.conf import tower_settings -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled from awx.api.fields import BooleanNullField, CharNullField, ChoiceNullField, EncryptedPasswordField, VerbatimField logger = logging.getLogger('awx.api.serializers') @@ -622,9 +621,9 @@ class UnifiedJobSerializer(BaseSerializer): def get_result_stdout(self, obj): obj_size = obj.result_stdout_size - if obj_size > tower_settings.STDOUT_MAX_BYTES_DISPLAY: + if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY: return "Standard Output too large to display (%d bytes), only download supported for sizes over %d bytes" % (obj_size, - tower_settings.STDOUT_MAX_BYTES_DISPLAY) + settings.STDOUT_MAX_BYTES_DISPLAY) return obj.result_stdout @@ -679,9 +678,9 @@ class UnifiedJobStdoutSerializer(UnifiedJobSerializer): def get_result_stdout(self, obj): obj_size = obj.result_stdout_size - if obj_size > tower_settings.STDOUT_MAX_BYTES_DISPLAY: + if obj_size > settings.STDOUT_MAX_BYTES_DISPLAY: return "Standard Output too large to display (%d bytes), only download supported for sizes over %d bytes" % (obj_size, - tower_settings.STDOUT_MAX_BYTES_DISPLAY) + settings.STDOUT_MAX_BYTES_DISPLAY) return obj.result_stdout def get_types(self): @@ -2099,7 +2098,7 @@ class AdHocCommandSerializer(UnifiedJobSerializer): # Load module name choices dynamically from DB settings. if field_name == 'module_name': field_class = serializers.ChoiceField - module_name_choices = [(x, x) for x in tower_settings.AD_HOC_COMMANDS] + module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS] module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else '' field_kwargs['choices'] = module_name_choices field_kwargs['required'] = bool(not module_name_default) @@ -2844,58 +2843,6 @@ class ActivityStreamSerializer(BaseSerializer): return summary_fields -class TowerSettingsSerializer(BaseSerializer): - - value = VerbatimField() - - class Meta: - model = TowerSettings - fields = ('key', 'description', 'category', 'value', 'value_type', 'user') - read_only_fields = ('description', 'category', 'value_type', 'user') - - def __init__(self, instance=None, data=serializers.empty, **kwargs): - if instance is None and data is not serializers.empty and 'key' in data: - try: - instance = TowerSettings.objects.get(key=data['key']) - except TowerSettings.DoesNotExist: - pass - super(TowerSettingsSerializer, self).__init__(instance, data, **kwargs) - - def to_representation(self, obj): - ret = super(TowerSettingsSerializer, self).to_representation(obj) - ret['value'] = getattr(obj, 'value_converted', obj.value) - return ret - - def to_internal_value(self, data): - if data['key'] not in settings.TOWER_SETTINGS_MANIFEST: - raise serializers.ValidationError({'key': ['Key {0} is not a valid settings key.'.format(data['key'])]}) - ret = super(TowerSettingsSerializer, self).to_internal_value(data) - manifest_val = settings.TOWER_SETTINGS_MANIFEST[data['key']] - ret['description'] = manifest_val['description'] - ret['category'] = manifest_val['category'] - ret['value_type'] = manifest_val['type'] - return ret - - def validate(self, attrs): - manifest = settings.TOWER_SETTINGS_MANIFEST - if attrs['key'] not in manifest: - raise serializers.ValidationError(dict(key=["Key {0} is not a valid settings key.".format(attrs['key'])])) - - if attrs['value_type'] == 'json': - attrs['value'] = json.dumps(attrs['value']) - elif attrs['value_type'] == 'list': - try: - attrs['value'] = ','.join(map(force_text, attrs['value'])) - except TypeError: - attrs['value'] = force_text(attrs['value']) - elif attrs['value_type'] == 'bool': - attrs['value'] = force_text(bool(attrs['value'])) - else: - attrs['value'] = force_text(attrs['value']) - - return super(TowerSettingsSerializer, self).validate(attrs) - - class AuthTokenSerializer(serializers.Serializer): username = serializers.CharField() diff --git a/awx/api/templates/api/_new_in_awx.md b/awx/api/templates/api/_new_in_awx.md index 4df45be686..711c56983a 100644 --- a/awx/api/templates/api/_new_in_awx.md +++ b/awx/api/templates/api/_new_in_awx.md @@ -7,3 +7,4 @@ {% if new_in_230 %}> _New in Ansible Tower 2.3.0_{% endif %} {% if new_in_240 %}> _New in Ansible Tower 2.4.0_{% endif %} {% if new_in_300 %}> _New in Ansible Tower 3.0.0_{% endif %} +{% if new_in_310 %}> _New in Ansible Tower 3.1.0_{% endif %} diff --git a/awx/api/urls.py b/awx/api/urls.py index dba7119a51..f5add3329c 100644 --- a/awx/api/urls.py +++ b/awx/api/urls.py @@ -319,10 +319,6 @@ activity_stream_urls = patterns('awx.api.views', url(r'^(?P[0-9]+)/$', 'activity_stream_detail'), ) -settings_urls = patterns('awx.api.views', - url(r'^$', 'settings_list'), - url(r'^reset/$', 'settings_reset')) - v1_urls = patterns('awx.api.views', url(r'^$', 'api_v1_root_view'), url(r'^ping/$', 'api_v1_ping_view'), @@ -332,7 +328,7 @@ v1_urls = patterns('awx.api.views', url(r'^me/$', 'user_me_list'), url(r'^dashboard/$', 'dashboard_view'), url(r'^dashboard/graphs/jobs/$','dashboard_jobs_graph_view'), - url(r'^settings/', include(settings_urls)), + url(r'^settings/', include('awx.conf.urls')), url(r'^schedules/', include(schedule_urls)), url(r'^organizations/', include(organization_urls)), url(r'^users/', include(user_urls)), diff --git a/awx/api/views.py b/awx/api/views.py index 551bb814e9..23dca79c80 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -3,14 +3,12 @@ # All Rights Reserved. # Python -import os import cgi import datetime import dateutil import time import socket import sys -import errno import logging from base64 import b64encode from collections import OrderedDict @@ -18,7 +16,6 @@ from collections import OrderedDict # Django from django.conf import settings from django.contrib.auth.models import User -from django.core.cache import cache from django.core.urlresolvers import reverse from django.core.exceptions import FieldError from django.db.models import Q, Count @@ -57,7 +54,6 @@ import ansiconv from social.backends.utils import load_backends # AWX -from awx.main.task_engine import TaskSerializer, TASK_FILE, TEMPORARY_TASK_FILE from awx.main.tasks import send_notifications from awx.main.access import get_user_queryset from awx.main.ha import is_ha_environment @@ -65,7 +61,7 @@ from awx.api.authentication import TaskAuthentication, TokenGetAuthentication from awx.api.utils.decorators import paginated from awx.api.generics import get_view_name from awx.api.generics import * # noqa -from awx.api.license import feature_enabled, feature_exists, LicenseForbids +from awx.conf.license import get_license, feature_enabled, feature_exists, LicenseForbids from awx.main.models import * # noqa from awx.main.utils import * # noqa from awx.api.permissions import * # noqa @@ -73,7 +69,6 @@ from awx.api.renderers import * # noqa from awx.api.serializers import * # noqa from awx.api.metadata import RoleMetadata from awx.main.utils import emit_websocket_notification -from awx.main.conf import tower_settings logger = logging.getLogger('awx.api.views') @@ -119,7 +114,7 @@ class ApiV1RootView(APIView): data['authtoken'] = reverse('api:auth_token_view') data['ping'] = reverse('api:api_v1_ping_view') data['config'] = reverse('api:api_v1_config_view') - data['settings'] = reverse('api:settings_list') + data['settings'] = reverse('api:setting_category_list') data['me'] = reverse('api:user_me_list') data['dashboard'] = reverse('api:dashboard_view') data['organizations'] = reverse('api:organization_list') @@ -189,12 +184,15 @@ class ApiV1ConfigView(APIView): def get(self, request, format=None): '''Return various sitewide configuration settings.''' - license_reader = TaskSerializer() - license_data = license_reader.from_database(show_key=request.user.is_superuser or request.user.is_system_auditor) + if request.user.is_superuser or request.user.is_system_auditor: + license_data = get_license(show_key=True) + else: + license_data = get_license(show_key=False) if license_data and 'features' in license_data and 'activity_streams' in license_data['features']: - license_data['features']['activity_streams'] &= tower_settings.ACTIVITY_STREAM_ENABLED + # FIXME: Make the final setting value dependent on the feature? + license_data['features']['activity_streams'] &= settings.ACTIVITY_STREAM_ENABLED - pendo_state = tower_settings.PENDO_TRACKING_STATE if tower_settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off' + pendo_state = settings.PENDO_TRACKING_STATE if settings.PENDO_TRACKING_STATE in ('off', 'anonymous', 'detailed') else 'off' data = dict( time_zone=settings.TIME_ZONE, @@ -245,19 +243,18 @@ class ApiV1ConfigView(APIView): except Exception: # FIX: Log return Response({"error": "Invalid JSON"}, status=status.HTTP_400_BAD_REQUEST) - license_reader = TaskSerializer() try: - license_data = license_reader.from_string(data_actual) + from awx.main.task_engine import TaskEnhancer + license_data = json.loads(data_actual) + license_data = TaskEnhancer(**license_data).validate_enhancements() except Exception: # FIX: Log return Response({"error": "Invalid License"}, status=status.HTTP_400_BAD_REQUEST) - # If the license is valid, write it to disk. + # If the license is valid, write it to the database. if license_data['valid_key']: - tower_settings.LICENSE = data_actual - tower_settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host()) - # Clear cache when license is updated. - cache.clear() + settings.LICENSE = data_actual + settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host()) return Response(license_data) return Response({"error": "Invalid license"}, status=status.HTTP_400_BAD_REQUEST) @@ -266,26 +263,14 @@ class ApiV1ConfigView(APIView): if not request.user.is_superuser: return Response(None, status=status.HTTP_404_NOT_FOUND) - # Remove license file - has_error = None - for fname in (TEMPORARY_TASK_FILE, TASK_FILE): - try: - os.remove(fname) - except OSError as e: - if e.errno != errno.ENOENT: - has_error = e.errno - break - - TowerSettings.objects.filter(key="LICENSE").delete() - # Clear cache when license is updated. - cache.clear() - - # Only stop mongod if license removal succeeded - if has_error is None: + try: + settings.LICENSE = {} return Response(status=status.HTTP_204_NO_CONTENT) - else: + except: + # FIX: Log return Response({"error": "Failed to remove license (%s)" % has_error}, status=status.HTTP_400_BAD_REQUEST) + class DashboardView(APIView): view_name = "Dashboard" @@ -554,7 +539,7 @@ class AuthTokenView(APIView): # Note: This header is normally added in the middleware whenever an # auth token is included in the request header. headers = { - 'Auth-Token-Timeout': int(tower_settings.AUTH_TOKEN_EXPIRATION) + 'Auth-Token-Timeout': int(settings.AUTH_TOKEN_EXPIRATION) } return Response({'token': token.key, 'expires': token.expires}, headers=headers) if 'username' in request.data: @@ -3590,9 +3575,9 @@ class UnifiedJobStdout(RetrieveAPIView): def retrieve(self, request, *args, **kwargs): unified_job = self.get_object() obj_size = unified_job.result_stdout_size - if request.accepted_renderer.format != 'txt_download' and obj_size > tower_settings.STDOUT_MAX_BYTES_DISPLAY: + if request.accepted_renderer.format != 'txt_download' and obj_size > settings.STDOUT_MAX_BYTES_DISPLAY: response_message = "Standard Output too large to display (%d bytes), only download supported for sizes over %d bytes" % (obj_size, - tower_settings.STDOUT_MAX_BYTES_DISPLAY) + settings.STDOUT_MAX_BYTES_DISPLAY) if request.accepted_renderer.format == 'json': return Response({'range': {'start': 0, 'end': 1, 'absolute_end': 1}, 'content': response_message}) else: @@ -3689,8 +3674,8 @@ class NotificationTemplateTest(GenericAPIView): def post(self, request, *args, **kwargs): obj = self.get_object() - notification = obj.generate_notification("Tower Notification Test {} {}".format(obj.id, tower_settings.TOWER_URL_BASE), - {"body": "Ansible Tower Test Notification {} {}".format(obj.id, tower_settings.TOWER_URL_BASE)}) + notification = obj.generate_notification("Tower Notification Test {} {}".format(obj.id, settings.TOWER_URL_BASE), + {"body": "Ansible Tower Test Notification {} {}".format(obj.id, settings.TOWER_URL_BASE)}) if not notification: return Response({}, status=status.HTTP_400_BAD_REQUEST) else: @@ -3765,71 +3750,6 @@ class ActivityStreamDetail(RetrieveAPIView): # Okay, let it through. return super(ActivityStreamDetail, self).get(request, *args, **kwargs) -class SettingsList(ListCreateAPIView): - - model = TowerSettings - serializer_class = TowerSettingsSerializer - authentication_classes = [TokenGetAuthentication] + api_settings.DEFAULT_AUTHENTICATION_CLASSES - new_in_300 = True - filter_backends = () - - def get_queryset(self): - class SettingsIntermediary(object): - def __init__(self, key, description, category, value, - value_type, user=None): - self.key = key - self.description = description - self.category = category - self.value = value - self.value_type = value_type - self.user = user - - if not self.request.user.is_superuser: - # NOTE: Shortcutting the rbac class due to the merging of the settings manifest and the database - # we'll need to extend this more in the future when we have user settings - return [] - all_defined_settings = {} - for s in TowerSettings.objects.all(): - all_defined_settings[s.key] = SettingsIntermediary(s.key, - s.description, - s.category, - s.value_converted, - s.value_type, - s.user) - manifest_settings = settings.TOWER_SETTINGS_MANIFEST - settings_actual = [] - for settings_key in manifest_settings: - if settings_key in all_defined_settings: - settings_actual.append(all_defined_settings[settings_key]) - else: - m_entry = manifest_settings[settings_key] - settings_actual.append(SettingsIntermediary(settings_key, - m_entry['description'], - m_entry['category'], - m_entry['default'], - m_entry['type'])) - return settings_actual - - def delete(self, request, *args, **kwargs): - if not request.user.can_access(self.model, 'delete', None): - raise PermissionDenied() - TowerSettings.objects.all().delete() - return Response() - -class SettingsReset(APIView): - - view_name = "Reset a settings value" - new_in_300 = True - - def post(self, request): - # NOTE: Extend more with user settings - if not request.user.can_access(TowerSettings, 'delete', None): - raise PermissionDenied() - settings_key = request.data.get('key', None) - if settings_key is not None: - TowerSettings.objects.filter(key=settings_key).delete() - return Response(status=status.HTTP_204_NO_CONTENT) - class RoleList(ListAPIView): diff --git a/awx/conf/__init__.py b/awx/conf/__init__.py new file mode 100644 index 0000000000..8f00d64865 --- /dev/null +++ b/awx/conf/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.utils.module_loading import autodiscover_modules + +# Tower +from .registry import settings_registry + +default_app_config = 'awx.conf.apps.ConfConfig' + + +def register(setting, **kwargs): + settings_registry.register(setting, **kwargs) + + +def autodiscover(): + autodiscover_modules('conf', register_to=settings_registry) diff --git a/awx/conf/access.py b/awx/conf/access.py new file mode 100644 index 0000000000..84f4ca348c --- /dev/null +++ b/awx/conf/access.py @@ -0,0 +1,45 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.db.models import Q + +# Tower +from awx.main.access import BaseAccess, register_access +from awx.conf.models import Setting + + +class SettingAccess(BaseAccess): + ''' + - I can see settings when I am a super user or system auditor. + - I can edit settings when I am a super user. + - I can clear settings when I am a super user. + - I can always see/edit/clear my own user settings. + ''' + + model = Setting + + # For the checks below, obj will be an instance of a "Settings" class with + # an attribute for each setting and a "user" attribute (set to None unless + # it is a user setting). + + def get_queryset(self): + if self.user.is_superuser or self.user.is_system_auditor: + return self.model.objects.filter(Q(user__isnull=True) | Q(user=self.user)) + else: + return self.model.objects.filter(user=self.user) + + def can_read(self, obj): + return bool(self.user.is_superuser or self.user.is_system_auditor or (obj and obj.user == self.user)) + + def can_add(self, data): + return False # There is no API endpoint to POST new settings. + + def can_change(self, obj, data): + return bool(self.user.is_superuser or (obj and obj.user == self.user)) + + def can_delete(self, obj): + return bool(self.user.is_superuser or (obj and obj.user == self.user)) + + +register_access(Setting, SettingAccess) diff --git a/awx/conf/apps.py b/awx/conf/apps.py new file mode 100644 index 0000000000..a77cc84209 --- /dev/null +++ b/awx/conf/apps.py @@ -0,0 +1,16 @@ +# Django +from django.apps import AppConfig +# from django.core import checks +from django.utils.translation import ugettext_lazy as _ + + +class ConfConfig(AppConfig): + + name = 'awx.conf' + verbose_name = _('Configuration') + + def ready(self): + self.module.autodiscover() + from .settings import SettingsWrapper + SettingsWrapper.initialize() + # checks.register(SettingsWrapper._check_settings) diff --git a/awx/conf/conf.py b/awx/conf/conf.py new file mode 100644 index 0000000000..e14e7c684c --- /dev/null +++ b/awx/conf/conf.py @@ -0,0 +1,95 @@ +# Django +from django.conf import settings +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register +from awx.conf import settings_registry + +# Define a conf.py file within your app and register each setting similarly to +# the example below. Any field class from Django REST Framework or subclass +# thereof can be used for validation/conversion of the setting. All keyword +# arguments to the register function (except field_class, category, +# category_slug, depends_on, placeholder) will be used to initialize +# the field_class. + +register( + 'ANSIBLE_COW_SELECTION', + field_class=fields.ChoiceField, + choices=[ + ('bud-frogs', _('Bud Frogs')), + ('bunny', _('Bunny')), + ('cheese', _('Cheese')), + ('daemon', _('Daemon')), + ('default', _('Default Cow')), + ('dragon', _('Dragon')), + ('elephant-in-snake', _('Elephant in Snake')), + ('elephant', _('Elephant')), + ('eyes', _('Eyes')), + ('hellokitty', _('Hello Kitty')), + ('kitty', _('Kitty')), + ('luke-koala', _('Luke Koala')), + ('meow', _('Meow')), + ('milk', _('Milk')), + ('moofasa', _('Moofasa')), + ('moose', _('Moose')), + ('ren', _('Ren')), + ('sheep', _('Sheep')), + ('small', _('Small Cow')), + ('stegosaurus', _('Stegosaurus')), + ('stimpy', _('Stimpy')), + ('supermilker', _('Super Milker')), + ('three-eyes', _('Three Eyes')), + ('turkey', _('Turkey')), + ('turtle', _('Turtle')), + ('tux', _('Tux')), + ('udder', _('Udder')), + ('vader-koala', _('Vader Koala')), + ('vader', _('Vader')), + ('www', _('WWW')), + ], + default='default', + label=_('Cow Selection'), + help_text=_('Select which cow to use with cowsay when running jobs.'), + category=_('Cows'), + # Optional; category_slug will be slugified version of category if not + # explicitly provided. + category_slug='cows', +) + +def _get_read_only_ansible_cow_selection_default(): + return getattr(settings, 'ANSIBLE_COW_SELECTION', 'No default cow!') + +register( + 'READONLY_ANSIBLE_COW_SELECTION', + field_class=fields.CharField, + # read_only must be set via kwargs even if field_class sets it. + read_only=True, + # default can be a callable to dynamically compute the value; should be in + # the plain JSON format stored in the DB and used in the API. + default=_get_read_only_ansible_cow_selection_default, + label=_('Example Read-Only Setting'), + help_text=_('Example setting that cannot be changed.'), + category=_('Cows'), + category_slug='cows', + # Optional; list of other settings this read-only setting depends on. When + # the other settings change, the cached value for this setting will be + # cleared to require it to be recomputed. + depends_on=['ANSIBLE_COW_SELECTION'], +) + +register( + 'EXAMPLE_USER_SETTING', + field_class=fields.CharField, + allow_blank=True, + label=_('Example Setting'), + help_text=_('Example setting which can be different for each user.'), + category=_('User'), + category_slug='user', + default='', +) + +# Unregister the example settings above. +settings_registry.unregister('ANSIBLE_COW_SELECTION') +settings_registry.unregister('READONLY_ANSIBLE_COW_SELECTION') +settings_registry.unregister('EXAMPLE_USER_SETTING') diff --git a/awx/conf/fields.py b/awx/conf/fields.py new file mode 100644 index 0000000000..b117bdf809 --- /dev/null +++ b/awx/conf/fields.py @@ -0,0 +1,55 @@ +# Python +import json +import logging +import os +import urlparse + +# Django +from django.core.validators import URLValidator +from django.utils.translation import ugettext_lazy as _ + +# Django REST Framework +from rest_framework.fields import * # noqa + +logger = logging.getLogger('awx.conf.fields') + +# Use DRF fields to convert/validate settings: +# - to_representation(obj) should convert a native Python object to a primitive +# serializable type. This primitive type will be what is presented in the API +# and stored in the JSON field in the datbase. +# - to_internal_value(data) should convert the primitive type back into the +# appropriate Python type to be used in settings. + + +class StringListField(ListField): + child = CharField() + + +class URLField(CharField): + + def __init__(self, **kwargs): + schemes = kwargs.pop('schemes', None) + self.allow_plain_hostname = kwargs.pop('allow_plain_hostname', False) + super(URLField, self).__init__(**kwargs) + validator_kwargs = dict(message=_('Enter a valid URL')) + if schemes is not None: + validator_kwargs['schemes'] = schemes + self.validators.append(URLValidator(**validator_kwargs)) + + def run_validators(self, value): + if self.allow_plain_hostname: + try: + url_parts = urlparse.urlsplit(value) + if url_parts.hostname and '.' not in url_parts.hostname: + netloc = '{}.local'.format(url_parts.hostname) + if url_parts.port: + netloc = '{}:{}'.format(netloc, port) + if url_parts.username: + if url_parts.password: + netloc = '{}:{}@{}' % (url_parts.username, url_parts.password, netloc) + else: + netloc = '{}@{}' % (url_parts.username, netloc) + value = urlparse.urlunsplit([url_parts.scheme, netloc, url_parts.path, url_parts.query, url_parts.fragment]) + except: + raise # If something fails here, just fall through and let the validators check it. + super(URLField, self).run_validators(value) diff --git a/awx/conf/license.py b/awx/conf/license.py new file mode 100644 index 0000000000..816b143e64 --- /dev/null +++ b/awx/conf/license.py @@ -0,0 +1,52 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.core.cache import cache +from django.core.signals import setting_changed +from django.dispatch import receiver +from django.utils.translation import ugettext_lazy as _ + +# Django REST Framework +from rest_framework.exceptions import APIException + +# Tower +from awx.main.task_engine import TaskEnhancer +from awx.main.utils import memoize + +__all__ = ['LicenseForbids', 'get_license', 'feature_enabled', 'feature_exists'] + + +class LicenseForbids(APIException): + status_code = 402 + default_detail = _('Your Tower license does not allow that.') + + +@memoize(cache_key='_validated_license_data') +def _get_validated_license_data(): + return TaskEnhancer().validate_enhancements() + + +@receiver(setting_changed) +def _on_setting_changed(sender, **kwargs): + # Clear cached result above when license changes. + if kwargs.get('setting', None) == 'LICENSE': + cache.delete('_validated_license_data') + + +def get_license(show_key=False): + """Return a dictionary representing the active license on this Tower instance.""" + license_data = _get_validated_license_data() + if not show_key: + license_data.pop('license_key', None) + return license_data + + +def feature_enabled(name): + """Return True if the requested feature is enabled, False otherwise.""" + return _get_validated_license_data().get('features', {}).get(name, False) + + +def feature_exists(name): + """Return True if the requested feature name exists, False otherwise.""" + return bool(name in _get_validated_license_data().get('features', {})) diff --git a/awx/conf/management/__init__.py b/awx/conf/management/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/conf/management/commands/__init__.py b/awx/conf/management/commands/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/conf/management/commands/migrate_to_database_settings.py b/awx/conf/management/commands/migrate_to_database_settings.py new file mode 100644 index 0000000000..d369f362a0 --- /dev/null +++ b/awx/conf/management/commands/migrate_to_database_settings.py @@ -0,0 +1,328 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import collections +import difflib +import json +import os +import shutil + +# Django +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django.db import transaction +from django.utils.text import slugify +from django.utils.timezone import now + +# Tower +from awx import MODE +from awx.conf import settings_registry +from awx.conf.fields import empty, SkipField +from awx.conf.models import Setting +from awx.conf.utils import comment_assignments + + +class Command(BaseCommand): + + def add_arguments(self, parser): + parser.add_argument( + 'category', + nargs='*', + type=str, + ) + parser.add_argument( + '--dry-run', + action='store_true', + dest='dry_run', + default=False, + help='Only show which settings would be commented/migrated.', + ) + parser.add_argument( + '--no-comment', + action='store_true', + dest='no_comment', + default=False, + help='Skip commenting out settings in files.', + ) + parser.add_argument( + '--backup-suffix', + dest='backup_suffix', + default=now().strftime('.%Y%m%d%H%M%S'), + help='Backup existing settings files with this suffix.', + ) + + @transaction.atomic + def handle(self, *args, **options): + self.verbosity = int(options.get('verbosity', 1)) + self.dry_run = bool(options.get('dry_run', False)) + self.no_comment = bool(options.get('no_comment', False)) + self.backup_suffix = options.get('backup_suffix', '') + self.categories = options.get('category', None) or ['all'] + self.style.HEADING = self.style.MIGRATE_HEADING + self.style.LABEL = self.style.MIGRATE_LABEL + self.style.OK = self.style.SQL_FIELD + self.style.SKIP = self.style.WARNING + self.style.VALUE = self.style.SQL_KEYWORD + + # Determine if any categories provided are invalid. + category_slugs = [] + invalid_categories = [] + for category in self.categories: + category_slug = slugify(category) + if category_slug in settings_registry.get_registered_categories(): + if category_slug not in category_slugs: + category_slugs.append(category_slug) + else: + if category not in invalid_categories: + invalid_categories.append(category) + if len(invalid_categories) == 1: + raise CommandError('Invalid setting category: {}'.format(invalid_categories[0])) + elif len(invalid_categories) > 1: + raise CommandError('Invalid setting categories: {}'.format(', '.join(invalid_categories))) + + # Build a list of all settings to be migrated. + registered_settings = [] + for category_slug in category_slugs: + for registered_setting in settings_registry.get_registered_settings(category_slug=category_slug, read_only=False): + if registered_setting not in registered_settings: + registered_settings.append(registered_setting) + + self._migrate_settings(registered_settings) + + def _get_settings_file_patterns(self): + if MODE == 'development': + return [ + '/etc/tower/settings.py', + '/etc/tower/conf.d/*.py', + os.path.join(os.path.dirname(__file__), '..', '..', '..', 'settings', 'local_*.py') + ] + else: + return [ + os.environ.get('AWX_SETTINGS_FILE', '/etc/tower/settings.py'), + os.path.join(os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/'), '*.py'), + ] + + def _get_license_file(self): + return os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license') + + def _comment_license_file(self, dry_run=True): + license_file = self._get_license_file() + diff_lines = [] + if os.path.exists(license_file): + try: + raw_license_data = open(license_file).read() + license_data = json.loads(raw_license_data) + except Exception as e: + raise CommandError('Error reading license from {0}: {1!r}'.format(license_file, e)) + if self.backup_suffix: + backup_license_file = '{}{}'.format(license_file, self.backup_suffix) + else: + backup_license_file = '{}.old'.format(license_file) + diff_lines = list(difflib.unified_diff( + raw_license_data.splitlines(), + [], + fromfile=backup_license_file, + tofile=license_file, + lineterm='', + )) + if not dry_run: + if self.backup_suffix: + shutil.copy2(license_file, backup_license_file) + os.remove(license_file) + return diff_lines + + def _check_if_needs_comment(self, patterns, setting): + files_to_comment = [] + try: + # If any diffs are returned, this setting needs to be commented. + diffs = comment_assignments(patterns, setting, dry_run=True) + if setting == 'LICENSE': + diffs.extend(self._comment_license_file(dry_run=True)) + for diff in diffs: + for line in diff.splitlines(): + if line.startswith('+++ '): + files_to_comment.append(line[4:]) + except Exception as e: + raise CommandError('Error commenting {0}: {1!r}'.format(setting, e)) + return files_to_comment + + def _check_if_needs_migration(self, setting): + # Check whether the current value differs from the default. + default_value = settings.DEFAULTS_SNAPSHOT.get(setting, empty) + if default_value is empty and setting != 'LICENSE': + field = settings_registry.get_setting_field(setting, read_only=True) + try: + default_value = field.get_default() + except SkipField: + pass + current_value = getattr(settings, setting, empty) + if current_value != default_value: + if current_value is empty: + current_value = None + return current_value + return empty + + def _display_tbd(self, setting, files_to_comment, migrate_value): + if self.verbosity >= 1: + if files_to_comment: + if migrate_value is not empty: + action = 'Migrate + Comment' + else: + action = 'Comment' + self.stdout.write(' {}: {}'.format( + self.style.LABEL(setting), + self.style.OK(action), + )) + if self.verbosity >= 2: + if migrate_value is not empty: + self.stdout.write(' - Migrate value: {}'.format( + self.style.VALUE(repr(migrate_value)), + )) + for file_to_comment in files_to_comment: + self.stdout.write(' - Comment in: {}'.format( + self.style.VALUE(file_to_comment), + )) + else: + if self.verbosity >= 2: + self.stdout.write(' {}: {}'.format( + self.style.LABEL(setting), + self.style.SKIP('No Migration'), + )) + + def _display_migrate(self, setting, action, display_value): + if self.verbosity >= 1: + if action == 'No Change': + action = self.style.SKIP(action) + else: + action = self.style.OK(action) + self.stdout.write(' {}: {}'.format( + self.style.LABEL(setting), + action, + )) + if self.verbosity >= 2: + for line in display_value.splitlines(): + self.stdout.write(' {}'.format( + self.style.VALUE(line), + )) + + def _display_diff_summary(self, filename, added, removed): + self.stdout.write(' {} {}{} {}{}'.format( + self.style.LABEL(filename), + self.style.ERROR('-'), + self.style.ERROR(int(removed)), + self.style.OK('+'), + self.style.OK(str(added)), + )) + + def _display_comment(self, diffs): + for diff in diffs: + if self.verbosity >= 2: + for line in diff.splitlines(): + display_line = line + if line.startswith('--- ') or line.startswith('+++ '): + display_line = self.style.LABEL(line) + elif line.startswith('-'): + display_line = self.style.ERROR(line) + elif line.startswith('+'): + display_line = self.style.OK(line) + elif line.startswith('@@'): + display_line = self.style.VALUE(line) + if line.startswith('--- ') or line.startswith('+++ '): + self.stdout.write(' ' + display_line) + else: + self.stdout.write(' ' + display_line) + elif self.verbosity >= 1: + filename, lines_added, lines_removed = None, 0, 0 + for line in diff.splitlines(): + if line.startswith('+++ '): + if filename: + self._display_diff_summary(filename, lines_added, lines_removed) + filename, lines_added, lines_removed = line[4:], 0, 0 + elif line.startswith('+'): + lines_added += 1 + elif line.startswith('-'): + lines_removed += 1 + if filename: + self._display_diff_summary(filename, lines_added, lines_removed) + + def _migrate_settings(self, registered_settings): + patterns = self._get_settings_file_patterns() + + # Determine which settings need to be commented/migrated. + if self.verbosity >= 1: + self.stdout.write(self.style.HEADING('Discovering settings to be migrated and commented:')) + to_migrate = collections.OrderedDict() + to_comment = collections.OrderedDict() + for name in registered_settings: + files_to_comment = self._check_if_needs_comment(patterns, name) + if files_to_comment: + to_comment[name] = files_to_comment + migrate_value = empty + if files_to_comment: + migrate_value = self._check_if_needs_migration(name) + if migrate_value is not empty: + to_migrate[name] = migrate_value + self._display_tbd(name, files_to_comment, migrate_value) + if self.verbosity == 1 and not to_migrate and not to_comment: + self.stdout.write(' No settings found to migrate or comment!') + + # Now migrate those settings to the database. + if self.verbosity >= 1: + if self.dry_run: + self.stdout.write(self.style.HEADING('Migrating settings to database (dry-run):')) + else: + self.stdout.write(self.style.HEADING('Migrating settings to database:')) + if not to_migrate: + self.stdout.write(' No settings to migrate!') + for name, value in to_migrate.items(): + field = settings_registry.get_setting_field(name) + assert not field.read_only + try: + data = field.to_representation(value) + setting_value = field.run_validation(data) + db_value = field.to_representation(setting_value) + except Exception as e: + raise CommandError('Unable to assign value {0!r} to setting "{1}: {2!s}".'.format(value, name, e)) + display_value = json.dumps(db_value, indent=4) + # Always encode "raw" strings as JSON. + if isinstance(db_value, basestring): + db_value = json.dumps(db_value) + setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first() + action = 'No Change' + if not setting: + action = 'Migrated' + if not self.dry_run: + Setting.objects.create(key=name, user=None, value=db_value) + elif setting.value != db_value or type(setting.value) != type(db_value): + action = 'Updated' + if not self.dry_run: + setting.value = db_value + setting.save(update_fields=['value']) + self._display_migrate(name, action, display_value) + + # Now comment settings in settings files. + if self.verbosity >= 1: + if bool(self.dry_run or self.no_comment): + self.stdout.write(self.style.HEADING('Commenting settings in files (dry-run):')) + else: + self.stdout.write(self.style.HEADING('Commenting settings in files:')) + if not to_comment: + self.stdout.write(' No settings to comment!') + if to_comment: + to_comment_patterns = [] + license_file_to_comment = None + for files_to_comment in to_comment.values(): + for file_to_comment in files_to_comment: + if file_to_comment == self._get_license_file(): + license_file_to_comment = file_to_comment + elif file_to_comment not in to_comment_patterns: + to_comment_patterns.append(file_to_comment) + # Run once in dry-run mode to catch any errors from updating the files. + diffs = comment_assignments(to_comment_patterns, to_comment.keys(), dry_run=True, backup_suffix=self.backup_suffix) + # Then, if really updating, run again. + if not self.dry_run and not self.no_comment: + diffs = comment_assignments(to_comment_patterns, to_comment.keys(), dry_run=False, backup_suffix=self.backup_suffix) + if license_file_to_comment: + diffs.extend(self._comment_license_file(dry_run=False)) + self._display_comment(diffs) diff --git a/awx/conf/migrations/0001_initial.py b/awx/conf/migrations/0001_initial.py new file mode 100644 index 0000000000..f9613b15d1 --- /dev/null +++ b/awx/conf/migrations/0001_initial.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models +import jsonfield.fields +from django.conf import settings + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.CreateModel( + name='Setting', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('created', models.DateTimeField(default=None, editable=False)), + ('modified', models.DateTimeField(default=None, editable=False)), + ('key', models.CharField(max_length=255)), + ('value', jsonfield.fields.JSONField(null=True)), + ('user', models.ForeignKey(related_name='settings', default=None, editable=False, to=settings.AUTH_USER_MODEL, null=True)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/awx/conf/migrations/0002_v310_copy_tower_settings.py b/awx/conf/migrations/0002_v310_copy_tower_settings.py new file mode 100644 index 0000000000..7b1422ba97 --- /dev/null +++ b/awx/conf/migrations/0002_v310_copy_tower_settings.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import json + +from django.db import migrations + + +def copy_tower_settings(apps, schema_editor): + TowerSettings = apps.get_model('main', 'TowerSettings') + Setting = apps.get_model('conf', 'Setting') + for tower_setting in TowerSettings.objects.all().iterator(): + try: + value = tower_setting.value + # LICENSE is stored as a string; convert it to a dict. + if tower_setting.key == 'LICENSE': + value = json.loads(value) + # Anything else (e.g. TOWER_URL_BASE) that is stored as a string + # needs to be converted to a JSON-encoded string to work with the + # JSON field. + elif tower_setting.value_type == 'string': + value = json.dumps(value) + setting, created = Setting.objects.get_or_create( + key=tower_setting.key, + user=tower_setting.user, + defaults=dict(value=value), + ) + if not created and setting.value != value: + setting.value = value + setting.save(update_fields=['value']) + except Setting.MultipleObjectsReturned: + pass + + +def revert_tower_settings(apps, schema_editor): + TowerSettings = apps.get_model('main', 'TowerSettings') + Setting = apps.get_model('conf', 'Setting') + for setting in Setting.objects.all().iterator(): + value = setting.value + # LICENSE is stored as a JSON object; convert it back to a string. + if setting.key == 'LICENSE': + value = json.dumps(value) + defaults = dict( + value=value, + value_type='string', + description='', + category='', + ) + try: + tower_setting, created = TowerSettings.objects.get_or_create( + key=setting.key, + user=setting.user, + defaults=defaults, + ) + if not created: + update_fields = [] + for k, v in defaults.items(): + if getattr(tower_setting, k) != v: + setattr(tower_setting, k, v) + update_fields.append(k) + if update_fields: + tower_setting.save(update_fields=update_fields) + except TowerSettings.MultipleObjectsReturned: + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ('conf', '0001_initial'), + ('main', '0035_v310_jobevent_uuid'), + ] + + run_before = [ + ('main', '0036_v310_remove_tower_settings'), + ] + + operations = [ + migrations.RunPython(copy_tower_settings, revert_tower_settings), + ] diff --git a/awx/conf/migrations/__init__.py b/awx/conf/migrations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/conf/models.py b/awx/conf/models.py new file mode 100644 index 0000000000..ffddab3c54 --- /dev/null +++ b/awx/conf/models.py @@ -0,0 +1,59 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import json + +# Django +from django.db import models + +# Django-JSONField +from jsonfield import JSONField + +# Tower +from awx.main.models.base import CreatedModifiedModel + +__all__ = ['Setting'] + + +class Setting(CreatedModifiedModel): + + key = models.CharField( + max_length=255, + ) + value = JSONField( + null=True, + # FIXME: Enable when we upgrade to JSONField with support: + # load_kwargs={'object_pairs_hook': collections.OrderedDict}, + ) + user = models.ForeignKey( + 'auth.User', + related_name='settings', + default=None, + null=True, + editable=False, + on_delete=models.CASCADE, + ) + + def __unicode__(self): + try: + json_value = json.dumps(self.value) + except ValueError: + # In the rare case the DB value is invalid JSON. + json_value = u'' + if self.user: + return u'{} ({}) = {}'.format(self.key, self.user, json_value) + else: + return u'{} = {}'.format(self.key, json_value) + + @classmethod + def get_cache_key(self, key): + return key + + +import awx.conf.signals # noqa + +from awx.main.registrar import activity_stream_registrar # noqa +activity_stream_registrar.connect(Setting) + +import awx.conf.access # noqa diff --git a/awx/conf/registry.py b/awx/conf/registry.py new file mode 100644 index 0000000000..1babae1786 --- /dev/null +++ b/awx/conf/registry.py @@ -0,0 +1,121 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +from collections import OrderedDict +import logging + +# Django +from django.core.exceptions import ImproperlyConfigured +from django.utils.text import slugify +from django.utils.translation import ugettext_lazy as _ + +logger = logging.getLogger('awx.conf.registry') + +__all__ = ['settings_registry'] + + +class SettingsRegistry(object): + """Registry of all API-configurable settings and categories.""" + + def __init__(self): + self._registry = OrderedDict() + self._dependent_settings = {} + + def register(self, setting, **kwargs): + if setting in self._registry: + raise ImproperlyConfigured('Setting "{}" is already registered.'.format(setting)) + category = kwargs.setdefault('category', None) + category_slug = kwargs.setdefault('category_slug', slugify(category or '') or None) + if category_slug in {'all', 'changed', 'user-defaults'}: + raise ImproperlyConfigured('"{}" is a reserved category slug.'.format(category_slug)) + if 'field_class' not in kwargs: + raise ImproperlyConfigured('Setting must provide a field_class keyword argument.') + self._registry[setting] = kwargs + + # Normally for read-only/dynamic settings, depends_on will specify other + # settings whose changes may affect the value of this setting. Store + # this setting as a dependent for the other settings, so we can know + # which extra cache keys to clear when a setting changes. + depends_on = kwargs.setdefault('depends_on', None) or set() + for depends_on_setting in depends_on: + dependent_settings = self._dependent_settings.setdefault(depends_on_setting, set()) + dependent_settings.add(setting) + + def unregister(self, setting): + self._registry.pop(setting, None) + for dependent_settings in self._dependent_settings.values(): + dependent_settings.discard(setting) + + def get_dependent_settings(self, setting): + return self._dependent_settings.get(setting, set()) + + def get_registered_categories(self): + categories = { + 'all': _('All'), + 'changed': _('Changed'), + 'user': _('User'), + 'user-defaults': _('User Defaults'), + } + for setting, kwargs in self._registry.items(): + category_slug = kwargs.get('category_slug', None) + if category_slug is None or category_slug in categories: + continue + categories[category_slug] = kwargs.get('category', None) or category_slug + return categories + + def get_registered_settings(self, category_slug=None, read_only=None): + setting_names = [] + if category_slug == 'user-defaults': + category_slug = 'user' + if category_slug == 'changed': + category_slug = 'all' + for setting, kwargs in self._registry.items(): + if category_slug not in {None, 'all', kwargs.get('category_slug', None)}: + continue + if read_only in {True, False} and kwargs.get('read_only', False) != read_only: + # Note: Doesn't catch fields that set read_only via __init__; + # read-only field kwargs should always include read_only=True. + continue + setting_names.append(setting) + return setting_names + + def get_setting_field(self, setting, mixin_class=None, for_user=False, **kwargs): + from django.conf import settings + from rest_framework.fields import empty + field_kwargs = {} + field_kwargs.update(self._registry[setting]) + field_kwargs.update(kwargs) + field_class = original_field_class = field_kwargs.pop('field_class') + if mixin_class: + field_class = type(field_class.__name__, (mixin_class, field_class), {}) + category_slug = field_kwargs.pop('category_slug', None) + category = field_kwargs.pop('category', None) + depends_on = frozenset(field_kwargs.pop('depends_on', None) or []) + placeholder = field_kwargs.pop('placeholder', empty) + if getattr(field_kwargs.get('child', None), 'source', None) is not None: + field_kwargs['child'].source = None + field_instance = field_class(**field_kwargs) + field_instance.category_slug = category_slug + field_instance.category = category + field_instance.depends_on = depends_on + if placeholder is not empty: + field_instance.placeholder = placeholder + original_field_instance = field_instance + if field_class != original_field_class: + original_field_instance = original_field_class(**field_kwargs) + if category_slug == 'user' and for_user: + try: + field_instance.default = original_field_instance.to_representation(getattr(settings, setting)) + except: + logger.warning('Unable to retrieve default value for user setting "%s".', setting, exc_info=True) + elif not field_instance.read_only or field_instance.default is empty: + try: + field_instance.default = original_field_instance.to_representation(settings._awx_conf_settings._get_default(setting)) + except AttributeError: + pass + except: + logger.warning('Unable to retrieve default value for setting "%s".', setting, exc_info=True) + return field_instance + +settings_registry = SettingsRegistry() diff --git a/awx/conf/serializers.py b/awx/conf/serializers.py new file mode 100644 index 0000000000..6cbc4ae980 --- /dev/null +++ b/awx/conf/serializers.py @@ -0,0 +1,79 @@ +# Django REST Framework +from rest_framework import serializers + +# Tower +from awx.api.fields import VerbatimField +from awx.api.serializers import BaseSerializer +from awx.conf.models import Setting +from awx.conf import settings_registry + + +class SettingSerializer(BaseSerializer): + """Read-only serializer for activity stream.""" + + value = VerbatimField(allow_null=True) + + class Meta: + model = Setting + fields = ('id', 'key', 'value') + readonly_fields = ('id', 'key', 'value') + + def __init__(self, instance=None, data=serializers.empty, **kwargs): + if instance is None and data is not serializers.empty and 'key' in data: + try: + instance = Setting.objects.get(key=data['key']) + except Setting.DoesNotExist: + pass + super(SettingSerializer, self).__init__(instance, data, **kwargs) + + +class SettingCategorySerializer(serializers.Serializer): + """Serialize setting category """ + + url = serializers.CharField( + read_only=True, + ) + slug = serializers.CharField( + read_only=True, + ) + name = serializers.CharField( + read_only=True, + ) + + +class SettingFieldMixin(object): + """Mixin to use a registered setting field class for API display/validation.""" + + def to_representation(self, obj): + return obj + + def to_internal_value(self, value): + obj = super(SettingFieldMixin, self).to_internal_value(value) + return super(SettingFieldMixin, self).to_representation(obj) + + +class SettingSingletonSerializer(serializers.Serializer): + """Present a group of settings (by category) as a single object.""" + + def __init__(self, instance=None, data=serializers.empty, **kwargs): + # Instance (if given) should be an object with attributes for all of the + # settings in the category; never an actual Setting model instance. + assert instance is None or not hasattr(instance, 'pk') + super(SettingSingletonSerializer, self).__init__(instance, data, **kwargs) + + def get_fields(self): + fields = super(SettingSingletonSerializer, self).get_fields() + try: + category_slug = self.context['view'].kwargs.get('category_slug', 'all') + except (KeyError, AttributeError): + category_slug = '' + for key in settings_registry.get_registered_settings(category_slug=category_slug): + if self.instance and not hasattr(self.instance, key): + continue + extra_kwargs = {} + # Make LICENSE read-only here; update via /api/v1/config/ only. + if key == 'LICENSE': + extra_kwargs['read_only'] = True + field = settings_registry.get_setting_field(key, mixin_class=SettingFieldMixin, for_user=bool(category_slug == 'user'), **extra_kwargs) + fields[key] = field + return fields diff --git a/awx/conf/settings.py b/awx/conf/settings.py new file mode 100644 index 0000000000..1e156635c0 --- /dev/null +++ b/awx/conf/settings.py @@ -0,0 +1,273 @@ +# Python +import contextlib +import json +import logging +import threading +import time + +# Django +from django.conf import settings, UserSettingsHolder +from django.core.cache import cache +from django.core import checks +from django.core.exceptions import ImproperlyConfigured +from django.db import ProgrammingError, OperationalError + +# Django REST Framework +from rest_framework.fields import empty, SkipField + +# Tower +from awx.conf import settings_registry +from awx.conf.models import Setting + +# FIXME: Gracefully handle when settings are accessed before the database is +# ready (or during migrations). + +logger = logging.getLogger('awx.conf.settings') + +# Store a special value to indicate when a setting is not set in the database. +SETTING_CACHE_NOTSET = '___notset___' + +# Cannot store None in memcached; use a special value instead to indicate None. +# If the special value for None is the same as the "not set" value, then a value +# of None will be equivalent to the setting not being set (and will raise an +# AttributeError if there is no other default defined). +# SETTING_CACHE_NONE = '___none___' +SETTING_CACHE_NONE = SETTING_CACHE_NOTSET + +# Cannot store empty list/tuple in memcached; use a special value instead to +# indicate an empty list. +SETTING_CACHE_EMPTY_LIST = '___[]___' + +# Cannot store empty dict in memcached; use a special value instead to indicate +# an empty dict. +SETTING_CACHE_EMPTY_DICT = '___{}___' + +# Expire settings from cache after this many seconds. +SETTING_CACHE_TIMEOUT = 60 + +# Flag indicating whether to store field default values in the cache. +SETTING_CACHE_DEFAULTS = True + +__all__ = ['SettingsWrapper'] + + +@contextlib.contextmanager +def _log_database_error(): + try: + yield + except (ProgrammingError, OperationalError) as e: + logger.warning('Database settings are not available, using defaults (%s)', e, exc_info=True) + finally: + pass + + +class SettingsWrapper(UserSettingsHolder): + + @classmethod + def initialize(cls): + if not getattr(settings, '_awx_conf_settings', False): + settings_wrapper = cls(settings._wrapped) + settings._wrapped = settings_wrapper + + @classmethod + def _check_settings(cls, app_configs, **kwargs): + errors = [] + # FIXME: Warn if database not available! + for setting in Setting.objects.filter(key__in=settings_registry.get_registered_settings(), user__isnull=True): + field = settings_registry.get_setting_field(setting.key) + try: + field.to_internal_value(setting.value) + except Exception as e: + errors.append(checks.Error(str(e))) + return errors + + def __init__(self, default_settings): + self.__dict__['default_settings'] = default_settings + self.__dict__['_awx_conf_settings'] = self + self.__dict__['_awx_conf_preload_expires'] = None + self.__dict__['_awx_conf_preload_lock'] = threading.RLock() + + def _get_supported_settings(self): + return settings_registry.get_registered_settings() + + def _get_writeable_settings(self): + return settings_registry.get_registered_settings(read_only=False) + + def _get_cache_value(self, value): + if value is None: + value = SETTING_CACHE_NONE + elif isinstance(value, (list, tuple)) and len(value) == 0: + value = SETTING_CACHE_EMPTY_LIST + elif isinstance(value, (dict,)) and len(value) == 0: + value = SETTING_CACHE_EMPTY_DICT + return value + + def _preload_cache(self): + # Ensure we're only modifying local preload timeout from one thread. + with self._awx_conf_preload_lock: + # If local preload timeout has not expired, skip preloading. + if self._awx_conf_preload_expires and self._awx_conf_preload_expires > time.time(): + return + # Otherwise update local preload timeout. + self.__dict__['_awx_conf_preload_expires'] = time.time() + SETTING_CACHE_TIMEOUT + # If local preload timer has expired, check to see if another process + # has already preloaded the cache and skip preloading if so. + if cache.get('_awx_conf_preload_expires', empty) is not empty: + return + # Initialize all database-configurable settings with a marker value so + # to indicate from the cache that the setting is not configured without + # a database lookup. + settings_to_cache = dict([(key, SETTING_CACHE_NOTSET) for key in self._get_writeable_settings()]) + # Load all settings defined in the database. + for setting in Setting.objects.filter(key__in=settings_to_cache.keys(), user__isnull=True).order_by('pk'): + if settings_to_cache[setting.key] != SETTING_CACHE_NOTSET: + continue + settings_to_cache[setting.key] = self._get_cache_value(setting.value) + # Load field default value for any settings not found in the database. + if SETTING_CACHE_DEFAULTS: + for key, value in settings_to_cache.items(): + if value != SETTING_CACHE_NOTSET: + continue + field = settings_registry.get_setting_field(key) + try: + settings_to_cache[key] = self._get_cache_value(field.get_default()) + except SkipField: + pass + # Generate a cache key for each setting and store them all at once. + settings_to_cache = dict([(Setting.get_cache_key(k), v) for k, v in settings_to_cache.items()]) + settings_to_cache['_awx_conf_preload_expires'] = self._awx_conf_preload_expires + logger.debug('cache set_many(%r, %r)', settings_to_cache, SETTING_CACHE_TIMEOUT) + cache.set_many(settings_to_cache, SETTING_CACHE_TIMEOUT) + + def _get_local(self, name): + self._preload_cache() + cache_key = Setting.get_cache_key(name) + value = cache.get(cache_key, empty) + logger.debug('cache get(%r, %r) -> %r', cache_key, empty, value) + if value == SETTING_CACHE_NOTSET: + value = empty + elif value == SETTING_CACHE_NONE: + value = None + elif value == SETTING_CACHE_EMPTY_LIST: + value = [] + elif value == SETTING_CACHE_EMPTY_DICT: + value = {} + field = settings_registry.get_setting_field(name) + if value is empty: + setting = None + if not field.read_only: + setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first() + if setting: + value = setting.value + # If None implies not set, convert when reading the value. + if value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE: + value = SETTING_CACHE_NOTSET + else: + value = SETTING_CACHE_NOTSET + if SETTING_CACHE_DEFAULTS: + try: + value = field.get_default() + except SkipField: + pass + logger.debug('cache set(%r, %r, %r)', cache_key, self._get_cache_value(value), SETTING_CACHE_TIMEOUT) + cache.set(cache_key, self._get_cache_value(value), SETTING_CACHE_TIMEOUT) + if value == SETTING_CACHE_NOTSET and not SETTING_CACHE_DEFAULTS: + try: + value = field.get_default() + except SkipField: + pass + if value not in (empty, SETTING_CACHE_NOTSET): + try: + if field.read_only: + internal_value = field.to_internal_value(value) + field.run_validators(internal_value) + return internal_value + else: + return field.run_validation(value) + except: + logger.warning('The current value "%r" for setting "%s" is invalid.', value, name, exc_info=True) + return empty + + def _get_default(self, name): + return getattr(self.default_settings, name) + + def __getattr__(self, name): + value = empty + if name in self._get_supported_settings(): + with _log_database_error(): + value = self._get_local(name) + if value is not empty: + return value + return self._get_default(name) + + def _set_local(self, name, value): + field = settings_registry.get_setting_field(name) + if field.read_only: + logger.warning('Attempt to set read only setting "%s".', name) + raise ImproperlyConfigured('Setting "%s" is read only.'.format(name)) + + try: + data = field.to_representation(value) + setting_value = field.run_validation(data) + db_value = field.to_representation(setting_value) + except Exception as e: + logger.exception('Unable to assign value "%r" to setting "%s".', value, name, exc_info=True) + raise e + + # Always encode "raw" strings as JSON. + if isinstance(db_value, basestring): + db_value = json.dumps(db_value) + setting = Setting.objects.filter(key=name, user__isnull=True).order_by('pk').first() + if not setting: + setting = Setting.objects.create(key=name, user=None, value=db_value) + # post_save handler will delete from cache when added. + elif setting.value != db_value or type(setting.value) != type(db_value): + setting.value = db_value + setting.save(update_fields=['value']) + # post_save handler will delete from cache when changed. + + def __setattr__(self, name, value): + if name in self._get_supported_settings(): + with _log_database_error(): + self._set_local(name, value) + else: + setattr(self.default_settings, name, value) + + def _del_local(self, name): + field = settings_registry.get_setting_field(name) + if field.read_only: + logger.warning('Attempt to delete read only setting "%s".', name) + raise ImproperlyConfigured('Setting "%s" is read only.'.format(name)) + for setting in Setting.objects.filter(key=name, user__isnull=True): + setting.delete() + # pre_delete handler will delete from cache. + + def __delattr__(self, name): + if name in self._get_supported_settings(): + with _log_database_error(): + self._del_local(name) + else: + delattr(self.default_settings, name) + + def __dir__(self): + keys = [] + with _log_database_error(): + for setting in Setting.objects.filter(key__in=self._get_supported_settings(), user__isnull=True): + # Skip returning settings that have been overridden but are + # considered to be "not set". + if setting.value is None and SETTING_CACHE_NOTSET == SETTING_CACHE_NONE: + continue + if setting.key not in keys: + keys.append(str(setting.key)) + for key in dir(self.default_settings): + if key not in keys: + keys.append(key) + return keys + + def is_overridden(self, setting): + set_locally = False + if setting in self._get_supported_settings(): + with _log_database_error(): + set_locally = Setting.objects.filter(key=setting, user__isnull=True).exists() + set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting) + return (set_locally or set_on_default) diff --git a/awx/conf/signals.py b/awx/conf/signals.py new file mode 100644 index 0000000000..fdcac300cf --- /dev/null +++ b/awx/conf/signals.py @@ -0,0 +1,69 @@ +# Python +import logging + +# Django +from django.conf import settings +from django.core.cache import cache +from django.core.signals import setting_changed +from django.db.models.signals import post_save, pre_delete, post_delete +from django.dispatch import receiver + +# Tower +import awx.main.signals +from awx.conf import settings_registry +from awx.conf.models import Setting +from awx.conf.serializers import SettingSerializer + +logger = logging.getLogger('awx.conf.signals') + +awx.main.signals.model_serializer_mapping[Setting] = SettingSerializer + +__all__ = [] + + +def handle_setting_change(key, for_delete=False): + # When a setting changes or is deleted, remove its value from cache along + # with any other settings that depend on it. + setting_keys = [key] + for dependent_key in settings_registry.get_dependent_settings(key): + # Note: Doesn't handle multiple levels of dependencies! + setting_keys.append(dependent_key) + cache_keys = set([Setting.get_cache_key(k) for k in setting_keys]) + logger.debug('cache delete_many(%r)', cache_keys) + cache.delete_many(cache_keys) + + # Send setting_changed signal with new value for each setting. + for setting_key in setting_keys: + setting_changed.send( + sender=Setting, + setting=setting_key, + value=getattr(settings, setting_key, None), + enter=not bool(for_delete), + ) + + +@receiver(post_save, sender=Setting) +def on_post_save_setting(sender, **kwargs): + instance = kwargs['instance'] + # Skip for user-specific settings. + if instance.user: + return + handle_setting_change(instance.key) + + +@receiver(pre_delete, sender=Setting) +def on_pre_delete_setting(sender, **kwargs): + instance = kwargs['instance'] + # Skip for user-specific settings. + if instance.user: + return + # Save instance key (setting name) for post_delete. + instance._saved_key_ = instance.key + + +@receiver(post_delete, sender=Setting) +def on_post_delete_setting(sender, **kwargs): + instance = kwargs['instance'] + key = getattr(instance, '_saved_key_', None) + if key: + handle_setting_change(key, True) diff --git a/awx/conf/urls.py b/awx/conf/urls.py new file mode 100644 index 0000000000..15505f4c3c --- /dev/null +++ b/awx/conf/urls.py @@ -0,0 +1,15 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.conf.urls import patterns + +# Tower +from awx.api.urls import url + + +urlpatterns = patterns( + 'awx.conf.views', + url(r'^$', 'setting_category_list'), + url(r'^(?P[a-z0-9-]+)/$', 'setting_singleton_detail'), +) diff --git a/awx/conf/utils.py b/awx/conf/utils.py new file mode 100755 index 0000000000..b780038e9f --- /dev/null +++ b/awx/conf/utils.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# Python +import difflib +import glob +import os +import shutil + +# RedBaron +from redbaron import RedBaron, indent + +__all__ = ['comment_assignments'] + + +def comment_assignments(patterns, assignment_names, dry_run=True, backup_suffix='.old'): + if isinstance(patterns, basestring): + patterns = [patterns] + diffs = [] + for pattern in patterns: + for filename in sorted(glob.glob(pattern)): + filename = os.path.abspath(os.path.normpath(filename)) + if backup_suffix: + backup_filename = '{}{}'.format(filename, backup_suffix) + else: + backup_filename = None + diff = comment_assignments_in_file(filename, assignment_names, dry_run, backup_filename) + if diff: + diffs.append(diff) + return diffs + + +def comment_assignments_in_file(filename, assignment_names, dry_run=True, backup_filename=None): + if isinstance(assignment_names, basestring): + assignment_names = [assignment_names] + else: + assignment_names = assignment_names[:] + current_file_data = open(filename).read() + + for assignment_name in assignment_names[:]: + if assignment_name in current_file_data: + continue + if assignment_name in assignment_names: + assignment_names.remove(assignment_name) + if not assignment_names: + return '' + + replace_lines = {} + rb = RedBaron(current_file_data) + for assignment_node in rb.find_all('assignment'): + for assignment_name in assignment_names: + + # Only target direct assignments to a variable. + name_node = assignment_node.find('name', value=assignment_name) + if not name_node: + continue + if assignment_node.target.type != 'name': + continue + + # Build a new node that comments out the existing assignment node. + indentation = '{}# '.format(assignment_node.indentation or '') + new_node_content = indent(assignment_node.dumps(), indentation) + new_node_lines = new_node_content.splitlines() + # Add a pass statement in case the assignment block is the only + # child in a parent code block to prevent a syntax error. + if assignment_node.indentation: + new_node_lines[0] = new_node_lines[0].replace(indentation, '{}pass # '.format(assignment_node.indentation or ''), 1) + new_node_lines[0] = '{0}This setting is now configured via the Tower API.\n{1}'.format(indentation, new_node_lines[0]) + + # Store new node lines in dictionary to be replaced in file. + start_lineno = assignment_node.absolute_bounding_box.top_left.line + end_lineno = assignment_node.absolute_bounding_box.bottom_right.line + for n, new_node_line in enumerate(new_node_lines): + new_lineno = start_lineno + n + assert new_lineno <= end_lineno + replace_lines[new_lineno] = new_node_line + + if not replace_lines: + return '' + + # Iterate through all lines in current file and replace as needed. + current_file_lines = current_file_data.splitlines() + new_file_lines = [] + for n, line in enumerate(current_file_lines): + new_file_lines.append(replace_lines.get(n + 1, line)) + new_file_data = '\n'.join(new_file_lines) + new_file_lines = new_file_data.splitlines() + + # If changed, syntax check and write the new file; return a diff of changes. + diff_lines = [] + if new_file_data != current_file_data: + compile(new_file_data, filename, 'exec') + if backup_filename: + from_file = backup_filename + else: + from_file = '{}.old'.format(filename) + to_file = filename + diff_lines = list(difflib.unified_diff(current_file_lines, new_file_lines, fromfile=from_file, tofile=to_file, lineterm='')) + if not dry_run: + if backup_filename: + shutil.copy2(filename, backup_filename) + with open(filename, 'wb') as fileobj: + fileobj.write(new_file_data) + return '\n'.join(diff_lines) + + +if __name__ == '__main__': + pattern = os.path.join(os.path.dirname(__file__), '..', 'settings', 'local_*.py') + diffs = comment_assignments(pattern, ['AUTH_LDAP_ORGANIZATION_MAP']) + for diff in diffs: + print(diff) diff --git a/awx/conf/views.py b/awx/conf/views.py new file mode 100644 index 0000000000..5dfa71b84d --- /dev/null +++ b/awx/conf/views.py @@ -0,0 +1,129 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Python +import collections +import json +import sys + +# Django +from django.core.urlresolvers import reverse +from django.http import Http404 +from django.utils.translation import ugettext_lazy as _ + +# Django REST Framework +from rest_framework.exceptions import PermissionDenied +from rest_framework.response import Response +from rest_framework import serializers +from rest_framework import status + +# Tower +from awx.api.generics import * # noqa +from awx.main.utils import * # noqa +from awx.conf.models import Setting +from awx.conf.serializers import SettingCategorySerializer, SettingSingletonSerializer +from awx.conf import settings_registry + + +SettingCategory = collections.namedtuple('SettingCategory', ('url', 'slug', 'name')) + + +class SettingCategoryList(ListAPIView): + + model = Setting # Not exactly, but needed for the view. + serializer_class = SettingCategorySerializer + filter_backends = [] + new_in_310 = True + view_name = _('Setting Categories') + + def get_queryset(self): + setting_categories = [] + if self.request.user.is_superuser or self.request.user.is_system_auditor: + categories = settings_registry.get_registered_categories() + else: + categories = {'user': _('User')} + for category_slug in sorted(categories.keys()): + url = reverse('api:setting_singleton_detail', args=(category_slug,)) + setting_categories.append(SettingCategory(url, category_slug, categories[category_slug])) + return setting_categories + + +class SettingSingletonDetail(RetrieveUpdateDestroyAPIView): + + model = Setting # Not exactly, but needed for the view. + serializer_class = SettingSingletonSerializer + filter_backends = [] + new_in_310 = True + view_name = _('Setting Detail') + + def get_queryset(self): + self.category_slug = self.kwargs.get('category_slug', 'all') + all_category_slugs = settings_registry.get_registered_categories().keys() + if self.request.user.is_superuser or getattr(self.request.user, 'is_system_auditor', False): + category_slugs = all_category_slugs + else: + category_slugs = {'user'} + if self.category_slug not in all_category_slugs: + raise Http404 + if self.category_slug not in category_slugs: + raise PermissionDenied() + + registered_settings = settings_registry.get_registered_settings(category_slug=self.category_slug) + if self.category_slug == 'user': + return Setting.objects.filter(key__in=registered_settings, user=self.request.user) + else: + return Setting.objects.filter(key__in=registered_settings, user__isnull=True) + + def get_object(self): + settings_qs = self.get_queryset() + registered_settings = settings_registry.get_registered_settings(category_slug=self.category_slug) + all_settings = {} + for setting in settings_qs: + all_settings[setting.key] = setting.value + for key in registered_settings: + if key in all_settings or self.category_slug == 'changed': + continue + try: + field = settings_registry.get_setting_field(key, for_user=bool(self.category_slug == 'user')) + all_settings[key] = field.get_default() + except serializers.SkipField: + all_settings[key] = None + all_settings['user'] = self.request.user if self.category_slug == 'user' else None + obj = type('Settings', (object,), all_settings)() + self.check_object_permissions(self.request, obj) + return obj + + def perform_update(self, serializer): + settings_qs = self.get_queryset() + user = self.request.user if self.category_slug == 'user' else None + for key, value in serializer.validated_data.items(): + setattr(serializer.instance, key, value) + # Always encode "raw" strings as JSON. + if isinstance(value, basestring): + value = json.dumps(value) + setting = settings_qs.filter(key=key).order_by('pk').first() + if not setting: + setting = Setting.objects.create(key=key, user=user, value=value) + elif setting.value != value or type(setting.value) != type(value): + setting.value = value + setting.save(update_fields=['value']) + + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + self.perform_destroy(instance) + return Response(status=status.HTTP_204_NO_CONTENT) + + def perform_destroy(self, instance): + for setting in self.get_queryset(): + setting.delete() + + +# Create view functions for all of the class-based views to simplify inclusion +# in URL patterns and reverse URL lookups, converting CamelCase names to +# lowercase_with_underscore (e.g. MyView.as_view() becomes my_view). +this_module = sys.modules[__name__] +for attr, value in locals().items(): + if isinstance(value, type) and issubclass(value, APIView): + name = camelcase_to_underscore(attr) + view = value.as_view() + setattr(this_module, name, view) diff --git a/awx/main/__init__.py b/awx/main/__init__.py index e484e62be1..f500f439b6 100644 --- a/awx/main/__init__.py +++ b/awx/main/__init__.py @@ -1,2 +1,4 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. + +default_app_config = 'awx.main.apps.MainConfig' diff --git a/awx/main/access.py b/awx/main/access.py index a1cae71a66..2be67f0849 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -7,6 +7,7 @@ import sys import logging # Django +from django.conf import settings from django.db.models import Q from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType @@ -19,9 +20,8 @@ from awx.main.utils import * # noqa from awx.main.models import * # noqa from awx.main.models.unified_jobs import ACTIVE_STATES from awx.main.models.mixins import ResourceMixin -from awx.api.license import LicenseForbids -from awx.main.task_engine import TaskSerializer -from awx.main.conf import tower_settings +from awx.main.task_engine import TaskEnhancer +from awx.conf.license import LicenseForbids __all__ = ['get_user_queryset', 'check_user_access', 'user_accessible_objects', @@ -192,8 +192,7 @@ class BaseAccess(object): return self.can_change(obj, data) def check_license(self, add_host=False, feature=None, check_expiration=True): - reader = TaskSerializer() - validation_info = reader.from_database() + validation_info = TaskEnhancer().validate_enhancements() if ('test' in sys.argv or 'py.test' in sys.argv[0] or 'jenkins' in sys.argv) and not os.environ.get('SKIP_LICENSE_FIXUP_FOR_TEST', ''): validation_info['free_instances'] = 99999999 validation_info['time_remaining'] = 99999999 @@ -311,7 +310,7 @@ class UserAccess(BaseAccess): if self.user.is_superuser or self.user.is_system_auditor: return User.objects.all() - if tower_settings.ORG_ADMINS_CAN_SEE_ALL_USERS and \ + if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and \ (self.user.admin_of_organizations.exists() or self.user.auditor_of_organizations.exists()): return User.objects.all() @@ -1919,20 +1918,6 @@ class CustomInventoryScriptAccess(BaseAccess): def can_delete(self, obj): return self.can_admin(obj) - -class TowerSettingsAccess(BaseAccess): - ''' - - I can see settings when - - I am a super user - - I can edit settings when - - I am a super user - - I can clear settings when - - I am a super user - ''' - - model = TowerSettings - - class RoleAccess(BaseAccess): ''' - I can see roles when @@ -2009,7 +1994,6 @@ register_access(UnifiedJobTemplate, UnifiedJobTemplateAccess) register_access(UnifiedJob, UnifiedJobAccess) register_access(ActivityStream, ActivityStreamAccess) register_access(CustomInventoryScript, CustomInventoryScriptAccess) -register_access(TowerSettings, TowerSettingsAccess) register_access(Role, RoleAccess) register_access(NotificationTemplate, NotificationTemplateAccess) register_access(Notification, NotificationAccess) diff --git a/awx/main/apps.py b/awx/main/apps.py new file mode 100644 index 0000000000..f1ebe624d2 --- /dev/null +++ b/awx/main/apps.py @@ -0,0 +1,9 @@ +# Django +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class MainConfig(AppConfig): + + name = 'awx.main' + verbose_name = _('Main') diff --git a/awx/main/conf.py b/awx/main/conf.py index e506432f21..e0d16e8542 100644 --- a/awx/main/conf.py +++ b/awx/main/conf.py @@ -1,50 +1,175 @@ -# Copyright (c) 2015 Ansible, Inc.. -# All Rights Reserved. - +# Python +import json import logging +import os -from django.conf import settings as django_settings -from django.db.utils import ProgrammingError -from django.db import OperationalError -from awx.main.models.configuration import TowerSettings +# Django +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register logger = logging.getLogger('awx.main.conf') -class TowerConfiguration(object): +register( + 'ACTIVITY_STREAM_ENABLED', + field_class=fields.BooleanField, + label=_('Enable Activity Stream'), + help_text=_('Enable capturing activity for the Tower activity stream.'), + category=_('System'), + category_slug='system', +) - # TODO: Caching so we don't have to hit the database every time for settings - def __getattr__(self, key): - settings_manifest = django_settings.TOWER_SETTINGS_MANIFEST - if key not in settings_manifest: - raise AttributeError("Tower Setting with key '{0}' is not defined in the manifest".format(key)) - default_value = settings_manifest[key]['default'] - ts = TowerSettings.objects.filter(key=key) - try: - if not ts.exists(): - try: - val_actual = getattr(django_settings, key) - except AttributeError: - val_actual = default_value - return val_actual - return ts[0].value_converted - except (ProgrammingError, OperationalError), e: - # Database is not available yet, usually during migrations so lets use the default - logger.debug("Database settings not available yet, using defaults ({0})".format(e)) - return default_value +register( + 'ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC', + field_class=fields.BooleanField, + label=_('Enable Activity Stream for Inventory Sync'), + help_text=_('Enable capturing activity for the Tower activity stream when running inventory sync.'), + category=_('System'), + category_slug='system', +) - def __setattr__(self, key, value): - settings_manifest = django_settings.TOWER_SETTINGS_MANIFEST - if key not in settings_manifest: - raise AttributeError("Tower Setting with key '{0}' does not exist".format(key)) - settings_entry = settings_manifest[key] - try: - settings_actual = TowerSettings.objects.get(key=key) - except TowerSettings.DoesNotExist: - settings_actual = TowerSettings(key=key, - description=settings_entry['description'], - category=settings_entry['category'], - value_type=settings_entry['type']) - settings_actual.value_converted = value - settings_actual.save() +register( + 'ORG_ADMINS_CAN_SEE_ALL_USERS', + field_class=fields.BooleanField, + label=_('All Users Visible to Organization Admins'), + help_text=_('Controls whether any Organization Admin can view all users, even those not associated with their Organization.'), + category=_('System'), + category_slug='system', +) -tower_settings = TowerConfiguration() +register( + 'TOWER_ADMIN_ALERTS', + field_class=fields.BooleanField, + label=_('Enable Tower Administrator Alerts'), + help_text=_('Allow Tower to email Admin users for system events that may require attention.'), + category=_('System'), + category_slug='system', +) + +register( + 'TOWER_URL_BASE', + field_class=fields.URLField, + schemes=('http', 'https'), + allow_plain_hostname=True, # Allow hostname only without TLD. + label=_('Base URL of the Tower host'), + help_text=_('This setting is used by services like notifications to render ' + 'a valid url to the Tower host.'), + category=_('System'), + category_slug='system', +) + +register( + 'REMOTE_HOST_HEADERS', + field_class=fields.StringListField, + label=_('Remote Host Headers'), + help_text=_('HTTP headers and meta keys to search to determine remote host ' + 'name or IP. Add additional items to this list, such as ' + '"HTTP_X_FORWARDED_FOR", if behind a reverse proxy.\n\n' + 'Note: The headers will be searched in order and the first ' + 'found remote host name or IP will be used.\n\n' + 'In the below example 8.8.8.7 would be the chosen IP address.\n' + 'X-Forwarded-For: 8.8.8.7, 192.168.2.1, 127.0.0.1\n' + 'Host: 127.0.0.1\n' + 'REMOTE_HOST_HEADERS = [\'HTTP_X_FORWARDED_FOR\', ' + '\'REMOTE_ADDR\', \'REMOTE_HOST\']'), + category=_('System'), + category_slug='system', +) + +def _load_default_license_from_file(): + try: + license_file = os.environ.get('AWX_LICENSE_FILE', '/etc/tower/license') + if os.path.exists(license_file): + license_data = json.load(open(license_file)) + logger.debug('Read license data from "%s".', license_file) + return license_data + except: + logger.warning('Could not read license from "%s".', license_file, exc_info=True) + return {} + +register( + 'LICENSE', + field_class=fields.DictField, + default=_load_default_license_from_file, + label=_('Tower License'), + help_text=_('The license controls which features and functionality are ' + 'enabled in Tower. Use /api/v1/config/ to update or change ' + 'the license.'), + category=_('System'), + category_slug='system', +) + +register( + 'AD_HOC_COMMANDS', + field_class=fields.StringListField, + label=_('Ansible Modules Allowed for Ad Hoc Jobs'), + help_text=_('List of modules allowed to be used by ad-hoc jobs.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_ENABLED', + field_class=fields.BooleanField, + label=_('Enable PRoot for Job Execution'), + help_text=_('Isolates an Ansible job from protected parts of the Tower system to prevent exposing sensitive information.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_BASE_PATH', + field_class=fields.CharField, + label=_('Base PRoot execution path'), + help_text=_('The location that PRoot will create its temporary working directory.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_HIDE_PATHS', + field_class=fields.StringListField, + label=_('Paths to hide from PRoot jobs'), + help_text=_('Extra paths to hide from PRoot isolated processes.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_PROOT_SHOW_PATHS', + field_class=fields.StringListField, + label=_('Paths to expose to PRoot jobs'), + help_text=_('Explicit whitelist of paths to expose to PRoot jobs.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'STDOUT_MAX_BYTES_DISPLAY', + field_class=fields.IntegerField, + min_value=0, + label=_('Standard Output Maximum Display Size'), + help_text=_('Maximum Size of Standard Output in bytes to display before requiring the output be downloaded.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'SCHEDULE_MAX_JOBS', + field_class=fields.IntegerField, + min_value=1, + label=_('Maximum Scheduled Jobs'), + help_text=_('Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created.'), + category=_('Jobs'), + category_slug='jobs', +) + +register( + 'AWX_ANSIBLE_CALLBACK_PLUGINS', + field_class=fields.StringListField, + label=_('Ansible Callback Plugins'), + help_text=_('List of paths for extra callback plugins to be used when running jobs.'), + category=_('Jobs'), + category_slug='jobs', +) diff --git a/awx/main/management/commands/cleanup_facts.py b/awx/main/management/commands/cleanup_facts.py index 578bee3441..e4c2d9f6f4 100644 --- a/awx/main/management/commands/cleanup_facts.py +++ b/awx/main/management/commands/cleanup_facts.py @@ -13,7 +13,7 @@ from django.utils.timezone import now # AWX from awx.main.models.fact import Fact -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled OLDER_THAN = 'older_than' GRANULARITY = 'granularity' diff --git a/awx/main/management/commands/inventory_import.py b/awx/main/management/commands/inventory_import.py index 4ae521cd5c..7f87694cbe 100644 --- a/awx/main/management/commands/inventory_import.py +++ b/awx/main/management/commands/inventory_import.py @@ -26,10 +26,9 @@ from django.utils.encoding import smart_text # AWX from awx.main.models import * # noqa +from awx.main.task_engine import TaskEnhancer from awx.main.utils import ignore_inventory_computed_fields, check_proot_installed, wrap_args_with_proot from awx.main.signals import disable_activity_stream -from awx.main.task_engine import TaskSerializer as LicenseReader -from awx.main.conf import tower_settings logger = logging.getLogger('awx.main.commands.inventory_import') @@ -358,7 +357,7 @@ class ExecutableJsonLoader(BaseLoader): data = {} stdout, stderr = '', '' try: - if self.is_custom and getattr(tower_settings, 'AWX_PROOT_ENABLED', False): + if self.is_custom and getattr(settings, 'AWX_PROOT_ENABLED', False): if not check_proot_installed(): raise RuntimeError("proot is not installed but is configured for use") kwargs = {'proot_temp_dir': self.source_dir} # TODO: Remove proot dir @@ -1191,8 +1190,7 @@ class Command(NoArgsCommand): self._create_update_group_hosts() def check_license(self): - reader = LicenseReader() - license_info = reader.from_database() + license_info = TaskEnhancer().validate_enhancements() if not license_info or len(license_info) == 0: self.logger.error(LICENSE_NON_EXISTANT_MESSAGE) raise CommandError('No Tower license found!') diff --git a/awx/main/management/commands/update_instance.py b/awx/main/management/commands/update_instance.py index 9cfecfb22d..346d5b728f 100644 --- a/awx/main/management/commands/update_instance.py +++ b/awx/main/management/commands/update_instance.py @@ -5,7 +5,7 @@ from django.core.management.base import CommandError from django.db import transaction from awx.main.management.commands._base_instance import BaseCommandInstance -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled from awx.main.models import Instance instance_str = BaseCommandInstance.instance_str diff --git a/awx/main/middleware.py b/awx/main/middleware.py index 021ff85ad5..75bdf01daa 100644 --- a/awx/main/middleware.py +++ b/awx/main/middleware.py @@ -5,13 +5,13 @@ import logging import threading import uuid +from django.conf import settings from django.contrib.auth.models import User from django.db.models.signals import post_save from django.db import IntegrityError from django.utils.functional import curry from awx.main.models import ActivityStream -from awx.main.conf import tower_settings from awx.api.authentication import TokenAuthentication @@ -79,6 +79,6 @@ class AuthTokenTimeoutMiddleware(object): if not TokenAuthentication._get_x_auth_token_header(request): return response - response['Auth-Token-Timeout'] = int(tower_settings.AUTH_TOKEN_EXPIRATION) + response['Auth-Token-Timeout'] = int(settings.AUTH_TOKEN_EXPIRATION) return response diff --git a/awx/main/migrations/0036_v310_remove_tower_settings.py b/awx/main/migrations/0036_v310_remove_tower_settings.py new file mode 100644 index 0000000000..a5a75b12ca --- /dev/null +++ b/awx/main/migrations/0036_v310_remove_tower_settings.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0035_v310_jobevent_uuid'), + ] + + # These settings are now in the separate awx.conf app. + operations = [ + migrations.RemoveField( + model_name='towersettings', + name='user', + ), + migrations.DeleteModel( + name='TowerSettings', + ), + ] diff --git a/awx/main/migrations/_old_access.py b/awx/main/migrations/_old_access.py index da49723a9e..2996816abe 100644 --- a/awx/main/migrations/_old_access.py +++ b/awx/main/migrations/_old_access.py @@ -13,6 +13,7 @@ import sys import logging # Django +from django.conf import settings from django.db.models import F, Q from django.contrib.auth.models import User @@ -22,9 +23,7 @@ from rest_framework.exceptions import ParseError, PermissionDenied # AWX from awx.main.utils import * # noqa from awx.main.models import * # noqa -from awx.api.license import LicenseForbids -from awx.main.task_engine import TaskSerializer -from awx.main.conf import tower_settings +from awx.conf.license import LicenseForbids __all__ = ['get_user_queryset', 'check_user_access'] @@ -153,8 +152,8 @@ class BaseAccess(object): return self.can_change(obj, None) def check_license(self, add_host=False, feature=None, check_expiration=True): - reader = TaskSerializer() - validation_info = reader.from_database() + from awx.main.task_engine import TaskEnhancer + validation_info = TaskEnhancer().validate_enhancements() if ('test' in sys.argv or 'py.test' in sys.argv[0] or 'jenkins' in sys.argv) and not os.environ.get('SKIP_LICENSE_FIXUP_FOR_TEST', ''): validation_info['free_instances'] = 99999999 validation_info['time_remaining'] = 99999999 @@ -202,7 +201,7 @@ class UserAccess(BaseAccess): qs = self.model.objects.distinct() if self.user.is_superuser: return qs - if tower_settings.ORG_ADMINS_CAN_SEE_ALL_USERS and self.user.deprecated_admin_of_organizations.all().exists(): + if settings.ORG_ADMINS_CAN_SEE_ALL_USERS and self.user.deprecated_admin_of_organizations.all().exists(): return qs return qs.filter( Q(pk=self.user.pk) | @@ -1624,29 +1623,6 @@ class CustomInventoryScriptAccess(BaseAccess): return False -class TowerSettingsAccess(BaseAccess): - ''' - - I can see settings when - - I am a super user - - I can edit settings when - - I am a super user - - I can clear settings when - - I am a super user - ''' - - model = TowerSettings - - def get_queryset(self): - if self.user.is_superuser: - return self.model.objects.all() - return self.model.objects.none() - - def can_change(self, obj, data): - return self.user.is_superuser - - def can_delete(self, obj): - return self.user.is_superuser - register_access(User, UserAccess) register_access(Organization, OrganizationAccess) register_access(Inventory, InventoryAccess) @@ -1672,4 +1648,3 @@ register_access(UnifiedJobTemplate, UnifiedJobTemplateAccess) register_access(UnifiedJob, UnifiedJobAccess) register_access(ActivityStream, ActivityStreamAccess) register_access(CustomInventoryScript, CustomInventoryScriptAccess) -register_access(TowerSettings, TowerSettingsAccess) diff --git a/awx/main/models/__init__.py b/awx/main/models/__init__.py index 1c019ce01a..161a59e65c 100644 --- a/awx/main/models/__init__.py +++ b/awx/main/models/__init__.py @@ -16,7 +16,6 @@ from awx.main.models.ad_hoc_commands import * # noqa from awx.main.models.schedules import * # noqa from awx.main.models.activity_stream import * # noqa from awx.main.models.ha import * # noqa -from awx.main.models.configuration import * # noqa from awx.main.models.rbac import * # noqa from awx.main.models.mixins import * # noqa from awx.main.models.notifications import * # noqa @@ -99,7 +98,6 @@ activity_stream_registrar.connect(AdHocCommand) # activity_stream_registrar.connect(Profile) activity_stream_registrar.connect(Schedule) activity_stream_registrar.connect(CustomInventoryScript) -activity_stream_registrar.connect(TowerSettings) activity_stream_registrar.connect(NotificationTemplate) activity_stream_registrar.connect(Notification) activity_stream_registrar.connect(Label) diff --git a/awx/main/models/ad_hoc_commands.py b/awx/main/models/ad_hoc_commands.py index 5c4a729c3e..b03be56452 100644 --- a/awx/main/models/ad_hoc_commands.py +++ b/awx/main/models/ad_hoc_commands.py @@ -22,7 +22,6 @@ from jsonfield import JSONField from awx.main.models.base import * # noqa from awx.main.models.unified_jobs import * # noqa from awx.main.utils import decrypt_field -from awx.main.conf import tower_settings from awx.main.models.notifications import JobNotificationMixin logger = logging.getLogger('awx.main.models.ad_hoc_commands') @@ -115,7 +114,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin): if type(self.module_name) not in (str, unicode): raise ValidationError("Invalid type for ad hoc command") module_name = self.module_name.strip() or 'command' - if module_name not in tower_settings.AD_HOC_COMMANDS: + if module_name not in settings.AD_HOC_COMMANDS: raise ValidationError('Unsupported module for ad hoc commands.') return module_name @@ -148,7 +147,7 @@ class AdHocCommand(UnifiedJob, JobNotificationMixin): return reverse('api:ad_hoc_command_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/ad_hoc_commands/{}".format(self.pk)) + return urljoin(settings.TOWER_URL_BASE, "/#/ad_hoc_commands/{}".format(self.pk)) @property def task_auth_token(self): diff --git a/awx/main/models/configuration.py b/awx/main/models/configuration.py deleted file mode 100644 index 208ccbd487..0000000000 --- a/awx/main/models/configuration.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# Python -import json - -# Django -from django.db import models -from django.utils.encoding import force_text -from django.utils.translation import ugettext_lazy as _ - -# Tower -from awx.main.models.base import CreatedModifiedModel - - -class TowerSettings(CreatedModifiedModel): - - class Meta: - app_label = 'main' - - SETTINGS_TYPE_CHOICES = [ - ('string', _("String")), - ('int', _('Integer')), - ('float', _('Decimal')), - ('json', _('JSON')), - ('bool', _('Boolean')), - ('password', _('Password')), - ('list', _('List')) - ] - - key = models.CharField( - max_length=255, - unique=True - ) - description = models.TextField() - category = models.CharField(max_length=128) - value = models.TextField( - blank=True, - ) - value_type = models.CharField( - max_length=12, - choices=SETTINGS_TYPE_CHOICES - ) - user = models.ForeignKey( - 'auth.User', - related_name='settings', - default=None, - null=True, - editable=False, - ) - - @property - def value_converted(self): - if self.value_type == 'json': - converted_type = json.loads(self.value) - elif self.value_type == 'password': - converted_type = self.value - elif self.value_type == 'list': - if self.value: - converted_type = [x.strip() for x in self.value.split(',')] - else: - converted_type = [] - elif self.value_type == 'bool': - converted_type = force_text(self.value).lower() in ('true', 'yes', '1') - elif self.value_type == 'string': - converted_type = self.value - else: - t = __builtins__[self.value_type] - converted_type = t(self.value) - return converted_type - - @value_converted.setter - def value_converted(self, value): - if self.value_type == 'json': - self.value = json.dumps(value) - elif self.value_type == 'list': - try: - self.value = ','.join(map(force_text, value)) - except TypeError: - self.value = force_text(value) - elif self.value_type == 'bool': - self.value = force_text(bool(value)) - else: - self.value = force_text(value) diff --git a/awx/main/models/credential.py b/awx/main/models/credential.py index 3188e10083..d7bc6a640e 100644 --- a/awx/main/models/credential.py +++ b/awx/main/models/credential.py @@ -1,9 +1,6 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import base64 -import re - # Django from django.db import models from django.utils.translation import ugettext_lazy as _ @@ -14,6 +11,7 @@ from django.core.urlresolvers import reverse from awx.main.fields import ImplicitRoleField from awx.main.constants import CLOUD_PROVIDERS from awx.main.utils import decrypt_field +from awx.main.validators import validate_ssh_private_key from awx.main.models.base import * # noqa from awx.main.models.mixins import ResourceMixin from awx.main.models.rbac import ( @@ -241,11 +239,13 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): else: ssh_key_data = self.ssh_key_data try: - key_data = validate_ssh_private_key(ssh_key_data) + pem_objects = validate_ssh_private_key(ssh_key_data) + for pem_object in pem_objects: + if pem_object.get('key_enc', False): + return True except ValidationError: - return False - else: - return bool(key_data['key_enc']) + pass + return False @property def needs_ssh_key_unlock(self): @@ -379,126 +379,3 @@ class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin): if 'cloud' not in update_fields: update_fields.append('cloud') super(Credential, self).save(*args, **kwargs) - - -def validate_ssh_private_key(data): - """Validate that the given SSH private key or certificate is, - in fact, valid. - """ - # Map the X in BEGIN X PRIVATE KEY to the key type (ssh-keygen -t). - # Tower jobs using OPENSSH format private keys may still fail if the - # system SSH implementation lacks support for this format. - key_types = { - 'RSA': 'rsa', - 'DSA': 'dsa', - 'EC': 'ecdsa', - 'OPENSSH': 'ed25519', - '': 'rsa1', - } - # Key properties to return if valid. - key_data = { - 'key_type': None, # Key type (from above mapping). - 'key_seg': '', # Key segment (all text including begin/end). - 'key_b64': '', # Key data as base64. - 'key_bin': '', # Key data as binary. - 'key_enc': None, # Boolean, whether key is encrypted. - 'cert_seg': '', # Cert segment (all text including begin/end). - 'cert_b64': '', # Cert data as base64. - 'cert_bin': '', # Cert data as binary. - } - data = data.strip() - validation_error = ValidationError('Invalid private key.') - - # Sanity check: We may potentially receive a full PEM certificate, - # and we want to accept these. - cert_begin_re = r'(-{4,})\s*BEGIN\s+CERTIFICATE\s*(-{4,})' - cert_end_re = r'(-{4,})\s*END\s+CERTIFICATE\s*(-{4,})' - cert_begin_match = re.search(cert_begin_re, data) - cert_end_match = re.search(cert_end_re, data) - if cert_begin_match and not cert_end_match: - raise validation_error - elif not cert_begin_match and cert_end_match: - raise validation_error - elif cert_begin_match and cert_end_match: - cert_dashes = set([cert_begin_match.groups()[0], cert_begin_match.groups()[1], - cert_end_match.groups()[0], cert_end_match.groups()[1]]) - if len(cert_dashes) != 1: - raise validation_error - key_data['cert_seg'] = data[cert_begin_match.start():cert_end_match.end()] - - # Find the private key, and also ensure that it internally matches - # itself. - # Set up the valid private key header and footer. - begin_re = r'(-{4,})\s*BEGIN\s+([A-Z0-9]+)?\s*PRIVATE\sKEY\s*(-{4,})' - end_re = r'(-{4,})\s*END\s+([A-Z0-9]+)?\s*PRIVATE\sKEY\s*(-{4,})' - begin_match = re.search(begin_re, data) - end_match = re.search(end_re, data) - if not begin_match or not end_match: - raise validation_error - - # Ensure that everything, such as dash counts and key type, lines up, - # and raise an error if it does not. - dashes = set([begin_match.groups()[0], begin_match.groups()[2], - end_match.groups()[0], end_match.groups()[2]]) - if len(dashes) != 1: - raise validation_error - if begin_match.groups()[1] != end_match.groups()[1]: - raise validation_error - key_type = begin_match.groups()[1] or '' - try: - key_data['key_type'] = key_types[key_type] - except KeyError: - raise ValidationError('Invalid private key: unsupported type %s' % key_type) - - # The private key data begins and ends with the private key. - key_data['key_seg'] = data[begin_match.start():end_match.end()] - - # Establish that we are able to base64 decode the private key; - # if we can't, then it's not a valid key. - # - # If we got a certificate, validate that also, in the same way. - header_re = re.compile(r'^(.+?):\s*?(.+?)(\\??)$') - for segment_name in ('cert', 'key'): - segment_to_validate = key_data['%s_seg' % segment_name] - # If we have nothing; skip this one. - # We've already validated that we have a private key above, - # so we don't need to do it again. - if not segment_to_validate: - continue - - # Ensure that this segment is valid base64 data. - base64_data = '' - line_continues = False - lines = segment_to_validate.splitlines() - for line in lines[1:-1]: - line = line.strip() - if not line: - continue - if line_continues: - line_continues = line.endswith('\\') - continue - line_match = header_re.match(line) - if line_match: - line_continues = line.endswith('\\') - continue - base64_data += line - try: - decoded_data = base64.b64decode(base64_data) - if not decoded_data: - raise validation_error - key_data['%s_b64' % segment_name] = base64_data - key_data['%s_bin' % segment_name] = decoded_data - except TypeError: - raise validation_error - - # Determine if key is encrypted. - if key_data['key_type'] == 'ed25519': - # See https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L3218 - # Decoded key data starts with magic string (null-terminated), four byte - # length field, followed by the ciphername -- if ciphername is anything - # other than 'none' the key is encrypted. - key_data['key_enc'] = not bool(key_data['key_bin'].startswith('openssh-key-v1\x00\x00\x00\x00\x04none')) - else: - key_data['key_enc'] = bool('ENCRYPTED' in key_data['key_seg']) - - return key_data diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index 0955a28667..5089c4a868 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -30,7 +30,6 @@ from awx.main.models.notifications import ( JobNotificationMixin, ) from awx.main.utils import _inventory_updates -from awx.main.conf import tower_settings __all__ = ['Inventory', 'Host', 'Group', 'InventorySource', 'InventoryUpdate', 'CustomInventoryScript'] @@ -1244,7 +1243,7 @@ class InventoryUpdate(UnifiedJob, InventorySourceOptions, JobNotificationMixin): return reverse('api:inventory_update_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/inventory_sync/{}".format(self.pk)) + return urljoin(settings.TOWER_URL_BASE, "/#/inventory_sync/{}".format(self.pk)) def is_blocked_by(self, obj): if type(obj) == InventoryUpdate: diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index 34adcb73a4..1602872d2b 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -31,7 +31,6 @@ from awx.main.models.notifications import ( from awx.main.utils import decrypt_field, ignore_inventory_computed_fields from awx.main.utils import emit_websocket_notification from awx.main.redact import PlainTextCleaner -from awx.main.conf import tower_settings from awx.main.fields import ImplicitRoleField from awx.main.models.mixins import ResourceMixin @@ -483,9 +482,9 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): @property def cache_timeout_blocked(self): - if Job.objects.filter(job_template=self, status__in=['pending', 'waiting', 'running']).count() > getattr(tower_settings, 'SCHEDULE_MAX_JOBS', 10): + if Job.objects.filter(job_template=self, status__in=['pending', 'waiting', 'running']).count() > getattr(settings, 'SCHEDULE_MAX_JOBS', 10): logger.error("Job template %s could not be started because there are more than %s other jobs from that template waiting to run" % - (self.name, getattr(tower_settings, 'SCHEDULE_MAX_JOBS', 10))) + (self.name, getattr(settings, 'SCHEDULE_MAX_JOBS', 10))) return True return False @@ -552,7 +551,7 @@ class Job(UnifiedJob, JobOptions, JobNotificationMixin): return reverse('api:job_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk)) + return urljoin(settings.TOWER_URL_BASE, "/#/jobs/{}".format(self.pk)) @property def task_auth_token(self): @@ -1376,7 +1375,7 @@ class SystemJob(UnifiedJob, SystemJobOptions, JobNotificationMixin): return reverse('api:system_job_detail', args=(self.pk,)) def get_ui_url(self): - return urljoin(tower_settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk)) + return urljoin(settings.TOWER_URL_BASE, "/#/management_jobs/{}".format(self.pk)) def is_blocked_by(self, obj): return True diff --git a/awx/main/models/organization.py b/awx/main/models/organization.py index 5f3dc9d7c9..4225bd7e70 100644 --- a/awx/main/models/organization.py +++ b/awx/main/models/organization.py @@ -23,7 +23,6 @@ from awx.main.models.rbac import ( ROLE_SINGLETON_SYSTEM_AUDITOR, ) from awx.main.models.mixins import ResourceMixin -from awx.main.conf import tower_settings __all__ = ['Organization', 'Team', 'Permission', 'Profile', 'AuthToken'] @@ -262,7 +261,7 @@ class AuthToken(BaseModel): if not now: now = tz_now() if not self.pk or not self.is_expired(now=now): - self.expires = now + datetime.timedelta(seconds=tower_settings.AUTH_TOKEN_EXPIRATION) + self.expires = now + datetime.timedelta(seconds=settings.AUTH_TOKEN_EXPIRATION) if save: self.save() @@ -279,12 +278,12 @@ class AuthToken(BaseModel): if now is None: now = tz_now() invalid_tokens = AuthToken.objects.none() - if tower_settings.AUTH_TOKEN_PER_USER != -1: + if settings.AUTH_TOKEN_PER_USER != -1: invalid_tokens = AuthToken.objects.filter( user=user, expires__gt=now, reason='', - ).order_by('-created')[tower_settings.AUTH_TOKEN_PER_USER:] + ).order_by('-created')[settings.AUTH_TOKEN_PER_USER:] return invalid_tokens def generate_key(self): @@ -313,7 +312,7 @@ class AuthToken(BaseModel): valid_n_tokens_qs = self.user.auth_tokens.filter( expires__gt=now, reason='', - ).order_by('-created')[0:tower_settings.AUTH_TOKEN_PER_USER] + ).order_by('-created')[0:settings.AUTH_TOKEN_PER_USER] valid_n_tokens = valid_n_tokens_qs.values_list('key', flat=True) return bool(self.key in valid_n_tokens) diff --git a/awx/main/models/projects.py b/awx/main/models/projects.py index 85ca3ab2aa..c9b23adc7b 100644 --- a/awx/main/models/projects.py +++ b/awx/main/models/projects.py @@ -28,7 +28,6 @@ from awx.main.models.unified_jobs import * # noqa from awx.main.models.mixins import ResourceMixin from awx.main.utils import update_scm_url from awx.main.fields import ImplicitRoleField -from awx.main.conf import tower_settings from awx.main.models.rbac import ( ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR, @@ -433,7 +432,7 @@ class ProjectUpdate(UnifiedJob, ProjectOptions, JobNotificationMixin): return reverse('api:project_update_detail', args=(self.pk,)) def get_ui_url(self): - return urlparse.urljoin(tower_settings.TOWER_URL_BASE, "/#/scm_update/{}".format(self.pk)) + return urlparse.urljoin(settings.TOWER_URL_BASE, "/#/scm_update/{}".format(self.pk)) def _update_parent_instance(self): parent_instance = self._get_parent_instance() diff --git a/awx/main/registrar.py b/awx/main/registrar.py index de6673fc5a..5f0a1589a4 100644 --- a/awx/main/registrar.py +++ b/awx/main/registrar.py @@ -1,11 +1,8 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import logging - from django.db.models.signals import pre_save, post_save, pre_delete, m2m_changed -logger = logging.getLogger('awx.main.registrar') class ActivityStreamRegistrar(object): @@ -13,9 +10,7 @@ class ActivityStreamRegistrar(object): self.models = [] def connect(self, model): - from awx.main.conf import tower_settings - if not getattr(tower_settings, 'ACTIVITY_STREAM_ENABLED', True): - return + # Always register model; the signal handlers will check if activity stream is enabled. from awx.main.signals import activity_stream_create, activity_stream_update, activity_stream_delete, activity_stream_associate if model not in self.models: diff --git a/awx/main/signals.py b/awx/main/signals.py index 7389f01763..324f3fe825 100644 --- a/awx/main/signals.py +++ b/awx/main/signals.py @@ -8,6 +8,7 @@ import threading import json # Django +from django.conf import settings from django.db.models.signals import post_save, pre_delete, post_delete, m2m_changed from django.dispatch import receiver @@ -21,7 +22,6 @@ from awx.api.serializers import * # noqa from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, emit_websocket_notification from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates from awx.main.tasks import update_inventory_computed_fields -from awx.main.conf import tower_settings __all__ = [] @@ -297,10 +297,10 @@ def update_host_last_job_after_job_deleted(sender, **kwargs): class ActivityStreamEnabled(threading.local): def __init__(self): - self.enabled = getattr(tower_settings, 'ACTIVITY_STREAM_ENABLED', True) + self.enabled = True def __nonzero__(self): - return bool(self.enabled) + return bool(self.enabled and getattr(settings, 'ACTIVITY_STREAM_ENABLED', True)) activity_stream_enabled = ActivityStreamEnabled() @@ -330,7 +330,6 @@ model_serializer_mapping = { JobTemplate: JobTemplateSerializer, Job: JobSerializer, AdHocCommand: AdHocCommandSerializer, - TowerSettings: TowerSettingsSerializer, NotificationTemplate: NotificationTemplateSerializer, Notification: NotificationSerializer, } @@ -354,7 +353,7 @@ def activity_stream_create(sender, instance, created, **kwargs): #TODO: Weird situation where cascade SETNULL doesn't work # it might actually be a good idea to remove all of these FK references since # we don't really use them anyway. - if type(instance) is not TowerSettings: + if instance._meta.model_name != 'setting': # Is not conf.Setting instance getattr(activity_entry, object1).add(instance) def activity_stream_update(sender, instance, **kwargs): @@ -377,7 +376,7 @@ def activity_stream_update(sender, instance, **kwargs): object1=object1, changes=json.dumps(changes)) activity_entry.save() - if type(instance) is not TowerSettings: + if instance._meta.model_name != 'setting': # Is not conf.Setting instance getattr(activity_entry, object1).add(instance) def activity_stream_delete(sender, instance, **kwargs): diff --git a/awx/main/south_migrations/0071_v240_changes.py b/awx/main/south_migrations/0071_v240_changes.py index cae03ef27a..98c44d4ad7 100644 --- a/awx/main/south_migrations/0071_v240_changes.py +++ b/awx/main/south_migrations/0071_v240_changes.py @@ -4,7 +4,7 @@ from south.db import db from south.v2 import DataMigration from django.db import models from django.utils.timezone import now -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled class Migration(DataMigration): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 097dca517d..c4138cdb52 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -48,8 +48,7 @@ from awx.main.constants import CLOUD_PROVIDERS from awx.main.models import * # noqa from awx.main.models import UnifiedJob from awx.main.queue import FifoQueue -from awx.main.conf import tower_settings -from awx.main.task_engine import TaskSerializer, TASK_TIMEOUT_INTERVAL +from awx.main.task_engine import TaskEnhancer from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url, emit_websocket_notification, check_proot_installed, build_proot_temp_dir, wrap_args_with_proot) @@ -105,10 +104,9 @@ def send_notifications(notification_list, job_id=None): @task(bind=True, queue='default') def run_administrative_checks(self): - if not tower_settings.TOWER_ADMIN_ALERTS: + if not settings.TOWER_ADMIN_ALERTS: return - reader = TaskSerializer() - validation_info = reader.from_database() + validation_info = TaskEnhancer().validate_enhancements() if validation_info.get('instance_count', 0) < 1: return used_percentage = float(validation_info.get('current_instances', 0)) / float(validation_info.get('instance_count', 100)) @@ -118,7 +116,7 @@ def run_administrative_checks(self): "Ansible Tower host usage over 90%", tower_admin_emails, fail_silently=True) - if validation_info.get('time_remaining', 0) < TASK_TIMEOUT_INTERVAL: + if validation_info.get('date_warning', False): send_mail("Ansible Tower license will expire soon", "Ansible Tower license will expire soon", tower_admin_emails, @@ -417,7 +415,7 @@ class BaseTask(Task): # NOTE: # Derived class should call add_ansible_venv() or add_tower_venv() if self.should_use_proot(instance, **kwargs): - env['PROOT_TMP_DIR'] = tower_settings.AWX_PROOT_BASE_PATH + env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH return env def build_safe_env(self, instance, **kwargs): @@ -530,7 +528,7 @@ class BaseTask(Task): instance = self.update_model(instance.pk) if instance.cancel_flag: try: - if tower_settings.AWX_PROOT_ENABLED and self.should_use_proot(instance): + if settings.AWX_PROOT_ENABLED and self.should_use_proot(instance): # NOTE: Refactor this once we get a newer psutil across the board if not psutil: os.kill(child.pid, signal.SIGKILL) @@ -727,9 +725,9 @@ class RunJob(BaseTask): ''' plugin_dir = self.get_path_to('..', 'plugins', 'callback') plugin_dirs = [plugin_dir] - if hasattr(tower_settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and \ - tower_settings.AWX_ANSIBLE_CALLBACK_PLUGINS: - plugin_dirs.append(tower_settings.AWX_ANSIBLE_CALLBACK_PLUGINS) + if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and \ + settings.AWX_ANSIBLE_CALLBACK_PLUGINS: + plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS) plugin_path = ':'.join(plugin_dirs) env = super(RunJob, self).build_env(job, **kwargs) env = self.add_ansible_venv(env) @@ -944,7 +942,7 @@ class RunJob(BaseTask): ''' Return whether this task should use proot. ''' - return getattr(tower_settings, 'AWX_PROOT_ENABLED', False) + return getattr(settings, 'AWX_PROOT_ENABLED', False) def post_run_hook(self, job, **kwargs): ''' @@ -1624,7 +1622,7 @@ class RunAdHocCommand(BaseTask): ''' Return whether this task should use proot. ''' - return getattr(tower_settings, 'AWX_PROOT_ENABLED', False) + return getattr(settings, 'AWX_PROOT_ENABLED', False) def post_run_hook(self, ad_hoc_command, **kwargs): ''' diff --git a/awx/main/tests/base.py b/awx/main/tests/base.py index 6b35297a07..287eb8a8c5 100644 --- a/awx/main/tests/base.py +++ b/awx/main/tests/base.py @@ -31,8 +31,8 @@ from django.utils.encoding import force_text # AWX from awx.main.models import * # noqa from awx.main.management.commands.run_task_system import run_taskmanager +from awx.main.task_engine import TaskEnhancer from awx.main.utils import get_ansible_version -from awx.main.task_engine import TaskEngager as LicenseWriter from awx.sso.backends import LDAPSettings from awx.main.tests.URI import URI # noqa @@ -143,35 +143,25 @@ class BaseTestMixin(MockCommonlySlowTestMixin): return __name__ + '-generated-' + string + rnd_str def create_test_license_file(self, instance_count=10000, license_date=int(time.time() + 3600), features=None): - writer = LicenseWriter( + settings.LICENSE = TaskEnhancer( company_name='AWX', contact_name='AWX Admin', contact_email='awx@example.com', license_date=license_date, instance_count=instance_count, license_type='enterprise', - features=features) - handle, license_path = tempfile.mkstemp(suffix='.json') - os.close(handle) - writer.write_file(license_path) - self._temp_paths.append(license_path) - os.environ['AWX_LICENSE_FILE'] = license_path - cache.clear() + features=features, + ).enhance() def create_basic_license_file(self, instance_count=100, license_date=int(time.time() + 3600)): - writer = LicenseWriter( + settings.LICENSE = TaskEnhancer( company_name='AWX', contact_name='AWX Admin', contact_email='awx@example.com', license_date=license_date, instance_count=instance_count, - license_type='basic') - handle, license_path = tempfile.mkstemp(suffix='.json') - os.close(handle) - writer.write_file(license_path) - self._temp_paths.append(license_path) - os.environ['AWX_LICENSE_FILE'] = license_path - cache.clear() + license_type='basic', + ).enhance() def create_expired_license_file(self, instance_count=1000, grace_period=False): license_date = time.time() - 1 diff --git a/awx/main/tests/data/ssh.py b/awx/main/tests/data/ssh.py index c2a9a29223..b3f5e8b675 100644 --- a/awx/main/tests/data/ssh.py +++ b/awx/main/tests/data/ssh.py @@ -1,3 +1,31 @@ +TEST_SSH_RSA1_KEY_DATA = '''-----BEGIN PRIVATE KEY----- +uFZFyag7VVqI+q/oGnQu+wj/pMi5ox+Qz5L3W0D745DzwgDXOeObAfNlr9NtIKbn +sZ5E0+rYB4Q/U0CYr5juNJQV1dbxq2Em1160axboe2QbvX6wE6Sm6wW9b9cr+PoF +MoYQebUnCY0ObrLbrRugSfZc17lyxK0ZGRgPXKhpMg6Ecv8XpvhjUYU9Esyqfuco +/p26Q140/HsHeHYNma0dQHCEjMr/qEzOY1qguHj+hRf3SARtM9Q+YNgpxchcDDVS +O+n+8Ljd/p82bpEJwxmpXealeWbI6gB9/R6wcCL+ZyCZpnHJd/NJ809Vtu47ZdDi +E6jvqS/3AQhuQKhJlLSDIzezB2VKKrHwOvHkg/+uLoCqHN34Gk6Qio7x69SvXy88 +a7q9D1l/Zx60o08FyZyqlo7l0l/r8EY+36cuI/lvAvfxc5VHVEOvKseUjFRBiCv9 +MkKNxaScoYsPwY7SIS6gD93tg3eM5pA0nfMfya9u1+uq/QCM1gNG3mm6Zd8YG4c/ +Dx4bmsj8cp5ni/Ffl/sKzKYq1THunJEFGXOZRibdxk/Fal3SQrRAwy7CgLQL8SMh +IWqcFm25OtSOP1r1LE25t5pQsMdmp0IP2fEF0t/pXPm1ZfrTurPMqpo4FGm2hkki +U3sH/o6nrkSOjklOLWlwtTkkL4dWPlNwc8OYj8zFizXJkAfv1spzhv3lRouNkw4N +Mm22W7us2f3Ob0H5C07k26h6VuXX+0AybD4tIIcUXCLoNTqA0HvqhKpEuHu3Ck10 +RaB8xHTxgwdhGVaNHMfy9B9l4tNs3Tb5k0LyeRRGVDhWCFo6axYULYebkj+hFLLY ++JE5RzPDFpTf1xbuT+e56H/lLFCUdDu0bn+D0W4ifXaVFegak4r6O4B53CbMqr+R +t6qDPKLUIuVJXK0J6Ay6XgmheXJGbgKh4OtDsc06gsTCE1nY4f/Z82AQahPBfTtF +J2z+NHdsLPn//HlxspGQtmLpuS7Wx0HYXZ+kPRSiE/vmITw85R2u8JSHQicVNN4C +2rlUo15TIU3tTx+WUIrHKHPidUNNotRb2p9n9FoSidU6upKnQHAT/JNv/zcvaia3 +Bhl/wagheWTDnFKSmJ4HlKxplM/32h6MfHqsMVOl4F6eZWKaKgSgN8doXyFJo+sc +yAC6S0gJlD2gQI24iTI4Du1+UGh2MGb69eChvi5mbbdesaZrlR1dRqZpHG+6ob4H +nYLndRvobXS5l6pgGTDRYoUgSbQe21a7Uf3soGl5jHqLWc1zEPwrxV7Wr31mApr6 +8VtGZcLSr0691Q1NLO3eIfuhbMN2mssX/Sl4t+4BibaucNIMfmhKQi8uHtwAXb47 ++TMFlG2EQhZULFM4fLdF1vaizInU3cBk8lsz8i71tDc+5VQTEwoEB7Gksy/XZWEt +6SGHxXUDtNYa+G2O+sQhgqBjLIkVTV6KJOpvNZM+s8Vzv8qoFnD7isKBBrRvF1bP +GOXEG1jd7nSR0WSwcMCHGOrFEELDQPw3k5jqEdPFgVODoZPr+drZVnVz5SAGBk5Y +wsCNaDW+1dABYFlqRTepP5rrSu9wHnRAZ3ZGv+DHoGqenIC5IBR0sQ== +-----END PRIVATE KEY-----''' + TEST_SSH_KEY_DATA = '''-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAyQ8F5bbgjHvk4SZJsKI9OmJKMFxZqRhvx4LaqjLTKbBwRBsY 1/C00NPiZn70dKbeyV7RNVZxuzM6yd3D3lwTdbDu/eJ0x72t3ch+TdLt/aenyy10 diff --git a/awx/main/tests/functional/api/test_activity_streams.py b/awx/main/tests/functional/api/test_activity_streams.py index f1c42cdd9d..85bb70f65d 100644 --- a/awx/main/tests/functional/api/test_activity_streams.py +++ b/awx/main/tests/functional/api/test_activity_streams.py @@ -6,28 +6,27 @@ from awx.main.models.activity_stream import ActivityStream from awx.main.access import ActivityStreamAccess from django.core.urlresolvers import reverse -from django.conf import settings -def mock_feature_enabled(feature, bypass_database=None): +def mock_feature_enabled(feature): return True @pytest.fixture def activity_stream_entry(organization, org_admin): return ActivityStream.objects.filter(organization__pk=organization.pk, user=org_admin, operation='associate').first() -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_get_activity_stream_list(monkeypatch, organization, get, user): +def test_get_activity_stream_list(monkeypatch, organization, get, user, settings): + settings.ACTIVITY_STREAM_ENABLED = True url = reverse('api:activity_stream_list') response = get(url, user('admin', True)) assert response.status_code == 200 -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_basic_fields(monkeypatch, organization, get, user): +def test_basic_fields(monkeypatch, organization, get, user, settings): + settings.ACTIVITY_STREAM_ENABLED = True u = user('admin', True) activity_stream = ActivityStream.objects.filter(organization=organization).latest('pk') activity_stream.actor = u @@ -44,10 +43,10 @@ def test_basic_fields(monkeypatch, organization, get, user): assert 'organization' in response.data['summary_fields'] assert response.data['summary_fields']['organization'][0]['name'] == 'test-org' -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_middleware_actor_added(monkeypatch, post, get, user): +def test_middleware_actor_added(monkeypatch, post, get, user, settings): + settings.ACTIVITY_STREAM_ENABLED = True u = user('admin-poster', True) url = reverse('api:organization_list') @@ -66,21 +65,19 @@ def test_middleware_actor_added(monkeypatch, post, get, user): assert response.status_code == 200 assert response.data['summary_fields']['actor']['username'] == 'admin-poster' -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_rbac_stream_resource_roles(activity_stream_entry, organization, org_admin): - +def test_rbac_stream_resource_roles(activity_stream_entry, organization, org_admin, settings): + settings.ACTIVITY_STREAM_ENABLED = True assert activity_stream_entry.user.first() == org_admin assert activity_stream_entry.organization.first() == organization assert activity_stream_entry.role.first() == organization.admin_role assert activity_stream_entry.object_relationship_type == 'awx.main.models.organization.Organization.admin_role' -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) @pytest.mark.django_db -def test_rbac_stream_user_roles(activity_stream_entry, organization, org_admin): - +def test_rbac_stream_user_roles(activity_stream_entry, organization, org_admin, settings): + settings.ACTIVITY_STREAM_ENABLED = True assert activity_stream_entry.user.first() == org_admin assert activity_stream_entry.organization.first() == organization assert activity_stream_entry.role.first() == organization.admin_role @@ -88,9 +85,9 @@ def test_rbac_stream_user_roles(activity_stream_entry, organization, org_admin): @pytest.mark.django_db @pytest.mark.activity_stream_access -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) -def test_stream_access_cant_change(activity_stream_entry, organization, org_admin): +def test_stream_access_cant_change(activity_stream_entry, organization, org_admin, settings): + settings.ACTIVITY_STREAM_ENABLED = True access = ActivityStreamAccess(org_admin) # These should always return false because the activity stream can not be edited assert not access.can_add(activity_stream_entry) @@ -99,12 +96,12 @@ def test_stream_access_cant_change(activity_stream_entry, organization, org_admi @pytest.mark.django_db @pytest.mark.activity_stream_access -@pytest.mark.skipif(not getattr(settings, 'ACTIVITY_STREAM_ENABLED', True), reason="Activity stream not enabled") @mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) def test_stream_queryset_hides_shows_items( activity_stream_entry, organization, user, org_admin, project, org_credential, inventory, label, deploy_jobtemplate, - notification_template, group, host, team): + notification_template, group, host, team, settings): + settings.ACTIVITY_STREAM_ENABLED = True # this user is not in any organizations and should not see any resource activity no_access_user = user('no-access-user', False) queryset = ActivityStreamAccess(no_access_user).get_queryset() diff --git a/awx/main/tests/functional/api/test_fact_versions.py b/awx/main/tests/functional/api/test_fact_versions.py index fe51c86515..fc521eb615 100644 --- a/awx/main/tests/functional/api/test_fact_versions.py +++ b/awx/main/tests/functional/api/test_fact_versions.py @@ -13,10 +13,10 @@ from awx.main.utils import timestamp_apiformat from django.core.urlresolvers import reverse from django.utils import timezone -def mock_feature_enabled(feature, bypass_database=None): +def mock_feature_enabled(feature): return True -def mock_feature_disabled(feature, bypass_database=None): +def mock_feature_disabled(feature): return False def setup_common(hosts, fact_scans, get, user, epoch=timezone.now(), get_params={}, host_count=1): diff --git a/awx/main/tests/functional/api/test_fact_view.py b/awx/main/tests/functional/api/test_fact_view.py index be5367ba52..fd646d9456 100644 --- a/awx/main/tests/functional/api/test_fact_view.py +++ b/awx/main/tests/functional/api/test_fact_view.py @@ -6,10 +6,10 @@ from awx.main.utils import timestamp_apiformat from django.core.urlresolvers import reverse from django.utils import timezone -def mock_feature_enabled(feature, bypass_database=None): +def mock_feature_enabled(feature): return True -def mock_feature_disabled(feature, bypass_database=None): +def mock_feature_disabled(feature): return False # TODO: Consider making the fact_scan() fixture a Class, instead of a function, and move this method into it diff --git a/awx/main/tests/functional/api/test_organizations.py b/awx/main/tests/functional/api/test_organizations.py index d141ddd6b5..2e153c56d5 100644 --- a/awx/main/tests/functional/api/test_organizations.py +++ b/awx/main/tests/functional/api/test_organizations.py @@ -99,7 +99,7 @@ def test_organization_inventory_list(organization, inventory_factory, get, alice @pytest.mark.django_db -@mock.patch('awx.api.views.feature_enabled', lambda feature,bypass_db=None: True) +@mock.patch('awx.api.views.feature_enabled', lambda feature: True) def test_create_organization(post, admin, alice): new_org = { 'name': 'new org', @@ -111,7 +111,7 @@ def test_create_organization(post, admin, alice): @pytest.mark.django_db -@mock.patch('awx.api.views.feature_enabled', lambda feature,bypass_db=None: True) +@mock.patch('awx.api.views.feature_enabled', lambda feature: True) def test_create_organization_xfail(post, alice): new_org = { 'name': 'new org', diff --git a/awx/main/tests/functional/api/test_survey_spec.py b/awx/main/tests/functional/api/test_survey_spec.py index d6cc512847..814a83f1ae 100644 --- a/awx/main/tests/functional/api/test_survey_spec.py +++ b/awx/main/tests/functional/api/test_survey_spec.py @@ -6,7 +6,7 @@ from django.core.urlresolvers import reverse from awx.main.models.jobs import JobTemplate, Job from awx.main.models.activity_stream import ActivityStream -from awx.api.license import LicenseForbids +from awx.conf.license import LicenseForbids from awx.main.access import JobTemplateAccess diff --git a/awx/main/tests/functional/commands/test_cleanup_facts.py b/awx/main/tests/functional/commands/test_cleanup_facts.py index 93ddb72d14..e67a751a42 100644 --- a/awx/main/tests/functional/commands/test_cleanup_facts.py +++ b/awx/main/tests/functional/commands/test_cleanup_facts.py @@ -16,10 +16,10 @@ from awx.main.management.commands.cleanup_facts import CleanupFacts, Command from awx.main.models.fact import Fact from awx.main.models.inventory import Host -def mock_feature_enabled(feature, bypass_database=None): +def mock_feature_enabled(feature): return True -def mock_feature_disabled(feature, bypass_database=None): +def mock_feature_disabled(feature): return False @pytest.mark.django_db diff --git a/awx/main/tests/functional/core/test_licenses.py b/awx/main/tests/functional/core/test_licenses.py index 37f3c63fa9..c9b2af6dc0 100644 --- a/awx/main/tests/functional/core/test_licenses.py +++ b/awx/main/tests/functional/core/test_licenses.py @@ -1,28 +1,23 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. -import json -import mock -import os -import tempfile import time import pytest from datetime import datetime from awx.main.models import Host -from awx.main.task_engine import TaskSerializer, TaskEngager - +from awx.main.task_engine import TaskEnhancer @pytest.mark.django_db def test_license_writer(inventory, admin): - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='acmecorp', contact_name='Michael DeHaan', contact_email='michael@ansibleworks.com', license_date=25000, # seconds since epoch instance_count=500) - data = writer.get_data() + data = task_enhancer.enhance() Host.objects.bulk_create( [ @@ -42,13 +37,7 @@ def test_license_writer(inventory, admin): assert data['license_date'] == 25000 assert data['license_key'] == "11bae31f31c6a6cdcb483a278cdbe98bd8ac5761acd7163a50090b0f098b3a13" - strdata = writer.get_string() - strdata_loaded = json.loads(strdata) - assert strdata_loaded == data - - reader = TaskSerializer() - - vdata = reader.from_string(strdata) + vdata = task_enhancer.validate_enhancements() assert vdata['available_instances'] == 500 assert vdata['current_instances'] == 12 @@ -63,70 +52,41 @@ def test_license_writer(inventory, admin): @pytest.mark.django_db def test_expired_licenses(): - reader = TaskSerializer() - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 3600), instance_count=100, trial=True) - strdata = writer.get_string() - vdata = reader.from_string(strdata) + task_enhancer.enhance() + vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] < 0 - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 2592001), instance_count=100, trial=False) - strdata = writer.get_string() - vdata = reader.from_string(strdata) + task_enhancer.enhance() + vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] < 0 - writer = TaskEngager( + task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 3600), instance_count=100, trial=False) - strdata = writer.get_string() - vdata = reader.from_string(strdata) + task_enhancer.enhance() + vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] > 0 - -@pytest.mark.django_db -def test_aws_license(): - os.environ['AWX_LICENSE_FILE'] = 'non-existent-license-file.json' - - h, path = tempfile.mkstemp() - with os.fdopen(h, 'w') as f: - json.dump({'instance_count': 100}, f) - - def fetch_ami(_self): - _self.attributes['ami-id'] = 'ami-00000000' - return True - - def fetch_instance(_self): - _self.attributes['instance-id'] = 'i-00000000' - return True - - with mock.patch('awx.main.task_engine.TEMPORARY_TASK_FILE', path): - with mock.patch('awx.main.task_engine.TemporaryTaskEngine.fetch_ami', fetch_ami): - with mock.patch('awx.main.task_engine.TemporaryTaskEngine.fetch_instance', fetch_instance): - reader = TaskSerializer() - license = reader.from_file() - assert license['is_aws'] - assert license['time_remaining'] - assert license['free_instances'] > 0 - assert license['grace_period_remaining'] > 0 - - os.unlink(path) diff --git a/awx/main/tests/functional/test_rbac_api.py b/awx/main/tests/functional/test_rbac_api.py index 54dcc8deb5..5d5591cc67 100644 --- a/awx/main/tests/functional/test_rbac_api.py +++ b/awx/main/tests/functional/test_rbac_api.py @@ -5,7 +5,7 @@ from django.db import transaction from django.core.urlresolvers import reverse from awx.main.models.rbac import Role, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR -def mock_feature_enabled(feature, bypass_database=None): +def mock_feature_enabled(feature): return True #@mock.patch('awx.api.views.feature_enabled', new=mock_feature_enabled) diff --git a/awx/main/tests/old/ad_hoc.py b/awx/main/tests/old/ad_hoc.py index 2c81ec71a0..ec3204e6d7 100644 --- a/awx/main/tests/old/ad_hoc.py +++ b/awx/main/tests/old/ad_hoc.py @@ -20,7 +20,6 @@ from crum import impersonate # AWX from awx.main.utils import * # noqa from awx.main.models import * # noqa -from awx.main.conf import tower_settings from awx.main.tests.base import BaseJobExecutionTest from awx.main.tests.data.ssh import ( TEST_SSH_KEY_DATA, @@ -572,14 +571,14 @@ class AdHocCommandApiTest(BaseAdHocCommandTest): # Try to relaunch ad hoc command when module has been removed from # allowed list of modules. try: - ad_hoc_commands = tower_settings.AD_HOC_COMMANDS - tower_settings.AD_HOC_COMMANDS = [] + ad_hoc_commands = settings.AD_HOC_COMMANDS + settings.AD_HOC_COMMANDS = [] with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['passwords_needed_to_start'], []) response = self.post(url, {}, expect=400) finally: - tower_settings.AD_HOC_COMMANDS = ad_hoc_commands + settings.AD_HOC_COMMANDS = ad_hoc_commands # Try to relaunch after the inventory has been marked inactive. self.inventory.delete() diff --git a/awx/main/tests/old/users.py b/awx/main/tests/old/users.py index df2d5e19bc..7d95fe7604 100644 --- a/awx/main/tests/old/users.py +++ b/awx/main/tests/old/users.py @@ -15,7 +15,6 @@ from django.test.utils import override_settings # AWX from awx.main.models import * # noqa from awx.main.tests.base import BaseTest -from awx.main.conf import tower_settings __all__ = ['AuthTokenTimeoutTest', 'AuthTokenLimitTest', 'AuthTokenProxyTest', 'UsersTest', 'LdapTest'] @@ -38,7 +37,7 @@ class AuthTokenTimeoutTest(BaseTest): response = self._generic_rest(dashboard_url, expect=200, method='get', return_response_object=True, client_kwargs=kwargs) self.assertIn('Auth-Token-Timeout', response) - self.assertEqual(response['Auth-Token-Timeout'], str(tower_settings.AUTH_TOKEN_EXPIRATION)) + self.assertEqual(response['Auth-Token-Timeout'], str(settings.AUTH_TOKEN_EXPIRATION)) class AuthTokenLimitTest(BaseTest): def setUp(self): diff --git a/awx/main/tests/unit/conftest.py b/awx/main/tests/unit/conftest.py new file mode 100644 index 0000000000..fab8214ed3 --- /dev/null +++ b/awx/main/tests/unit/conftest.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.fixture(autouse=True) +def _disable_database_settings(mocker): + mocker.patch('awx.conf.settings.SettingsWrapper._get_supported_settings', return_value=[]) diff --git a/awx/main/tests/unit/test_credentials.py b/awx/main/tests/unit/test_credentials.py deleted file mode 100644 index 7445d28fda..0000000000 --- a/awx/main/tests/unit/test_credentials.py +++ /dev/null @@ -1,56 +0,0 @@ -from django.core.exceptions import ValidationError -from awx.main.models.credential import validate_ssh_private_key - -import pytest - -def test_valid_rsa_key(): - begin = """-----BEGIN RSA PRIVATE KEY-----""" - end = """-----END RSA PRIVATE KEY-----""" - unvalidated_key = build_key(begin, body, end) - key_data = validate_ssh_private_key(unvalidated_key) - assert key_data['key_type'] == 'rsa' - -def test_invalid_key(): - unvalidated_key = build_key(key_begin, body, "END KEY") - with pytest.raises(ValidationError): - validate_ssh_private_key(unvalidated_key) - -def test_key_type_empty(): - unvalidated_key = build_key(key_begin, body, key_end) - key_data = validate_ssh_private_key(unvalidated_key) - assert key_data['key_type'] == 'rsa1' - - -def build_key(begin, body, end): - return """%s%s%s""" % (begin, body, end) - -key_begin = """-----BEGIN PRIVATE KEY-----""" -key_end = """-----END PRIVATE KEY-----""" - -body = """ -uFZFyag7VVqI+q/oGnQu+wj/pMi5ox+Qz5L3W0D745DzwgDXOeObAfNlr9NtIKbn -sZ5E0+rYB4Q/U0CYr5juNJQV1dbxq2Em1160axboe2QbvX6wE6Sm6wW9b9cr+PoF -MoYQebUnCY0ObrLbrRugSfZc17lyxK0ZGRgPXKhpMg6Ecv8XpvhjUYU9Esyqfuco -/p26Q140/HsHeHYNma0dQHCEjMr/qEzOY1qguHj+hRf3SARtM9Q+YNgpxchcDDVS -O+n+8Ljd/p82bpEJwxmpXealeWbI6gB9/R6wcCL+ZyCZpnHJd/NJ809Vtu47ZdDi -E6jvqS/3AQhuQKhJlLSDIzezB2VKKrHwOvHkg/+uLoCqHN34Gk6Qio7x69SvXy88 -a7q9D1l/Zx60o08FyZyqlo7l0l/r8EY+36cuI/lvAvfxc5VHVEOvKseUjFRBiCv9 -MkKNxaScoYsPwY7SIS6gD93tg3eM5pA0nfMfya9u1+uq/QCM1gNG3mm6Zd8YG4c/ -Dx4bmsj8cp5ni/Ffl/sKzKYq1THunJEFGXOZRibdxk/Fal3SQrRAwy7CgLQL8SMh -IWqcFm25OtSOP1r1LE25t5pQsMdmp0IP2fEF0t/pXPm1ZfrTurPMqpo4FGm2hkki -U3sH/o6nrkSOjklOLWlwtTkkL4dWPlNwc8OYj8zFizXJkAfv1spzhv3lRouNkw4N -Mm22W7us2f3Ob0H5C07k26h6VuXX+0AybD4tIIcUXCLoNTqA0HvqhKpEuHu3Ck10 -RaB8xHTxgwdhGVaNHMfy9B9l4tNs3Tb5k0LyeRRGVDhWCFo6axYULYebkj+hFLLY -+JE5RzPDFpTf1xbuT+e56H/lLFCUdDu0bn+D0W4ifXaVFegak4r6O4B53CbMqr+R -t6qDPKLUIuVJXK0J6Ay6XgmheXJGbgKh4OtDsc06gsTCE1nY4f/Z82AQahPBfTtF -J2z+NHdsLPn//HlxspGQtmLpuS7Wx0HYXZ+kPRSiE/vmITw85R2u8JSHQicVNN4C -2rlUo15TIU3tTx+WUIrHKHPidUNNotRb2p9n9FoSidU6upKnQHAT/JNv/zcvaia3 -Bhl/wagheWTDnFKSmJ4HlKxplM/32h6MfHqsMVOl4F6eZWKaKgSgN8doXyFJo+sc -yAC6S0gJlD2gQI24iTI4Du1+UGh2MGb69eChvi5mbbdesaZrlR1dRqZpHG+6ob4H -nYLndRvobXS5l6pgGTDRYoUgSbQe21a7Uf3soGl5jHqLWc1zEPwrxV7Wr31mApr6 -8VtGZcLSr0691Q1NLO3eIfuhbMN2mssX/Sl4t+4BibaucNIMfmhKQi8uHtwAXb47 -+TMFlG2EQhZULFM4fLdF1vaizInU3cBk8lsz8i71tDc+5VQTEwoEB7Gksy/XZWEt -6SGHxXUDtNYa+G2O+sQhgqBjLIkVTV6KJOpvNZM+s8Vzv8qoFnD7isKBBrRvF1bP -GOXEG1jd7nSR0WSwcMCHGOrFEELDQPw3k5jqEdPFgVODoZPr+drZVnVz5SAGBk5Y -wsCNaDW+1dABYFlqRTepP5rrSu9wHnRAZ3ZGv+DHoGqenIC5IBR0sQ== -""" diff --git a/awx/main/tests/unit/test_tasks.py b/awx/main/tests/unit/test_tasks.py index fb491a015b..881b7b15c6 100644 --- a/awx/main/tests/unit/test_tasks.py +++ b/awx/main/tests/unit/test_tasks.py @@ -10,9 +10,7 @@ from awx.main.tasks import ( send_notifications, run_administrative_checks, ) - -from awx.main.task_engine import TaskSerializer - +from awx.main.task_engine import TaskEnhancer @contextmanager def apply_patches(_patches): @@ -51,12 +49,11 @@ def test_send_notifications_list(mocker): @pytest.mark.parametrize("current_instances,call_count", [(91, 2), (89,1)]) def test_run_admin_checks_usage(mocker, current_instances, call_count): patches = list() - patches.append(mocker.patch('awx.main.tasks.tower_settings')) patches.append(mocker.patch('awx.main.tasks.User')) - mock_ts = mocker.Mock(spec=TaskSerializer) - mock_ts.from_database.return_value = {'instance_count': 100, 'current_instances': current_instances} - patches.append(mocker.patch('awx.main.tasks.TaskSerializer', return_value=mock_ts)) + mock_te = mocker.Mock(spec=TaskEnhancer) + mock_te.validate_enhancements.return_value = {'instance_count': 100, 'current_instances': current_instances, 'date_warning': True} + patches.append(mocker.patch('awx.main.tasks.TaskEnhancer', return_value=mock_te)) mock_sm = mocker.Mock() patches.append(mocker.patch('awx.main.tasks.send_mail', wraps=mock_sm)) diff --git a/awx/main/tests/unit/test_validators.py b/awx/main/tests/unit/test_validators.py new file mode 100644 index 0000000000..b62395424e --- /dev/null +++ b/awx/main/tests/unit/test_validators.py @@ -0,0 +1,91 @@ +from django.core.exceptions import ValidationError +from awx.main.validators import ( + validate_private_key, + validate_certificate, + validate_ssh_private_key, +) +from awx.main.tests.data.ssh import ( + TEST_SSH_RSA1_KEY_DATA, + TEST_SSH_KEY_DATA, + TEST_SSH_KEY_DATA_LOCKED, + TEST_OPENSSH_KEY_DATA, + TEST_OPENSSH_KEY_DATA_LOCKED, + TEST_SSH_CERT_KEY, +) + +import pytest + +def test_valid_rsa_key(): + valid_key = TEST_SSH_KEY_DATA + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert not pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert not pem_objects[0]['key_enc'] + +def test_valid_locked_rsa_key(): + valid_key = TEST_SSH_KEY_DATA_LOCKED + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa' + assert pem_objects[0]['key_enc'] + +def test_invalid_rsa_key(): + invalid_key = TEST_SSH_KEY_DATA.replace('-----END', '----END') + with pytest.raises(ValidationError): + validate_private_key(invalid_key) + with pytest.raises(ValidationError): + validate_certificate(invalid_key) + with pytest.raises(ValidationError): + validate_ssh_private_key(invalid_key) + +def test_valid_openssh_key(): + valid_key = TEST_OPENSSH_KEY_DATA + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert not pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert not pem_objects[0]['key_enc'] + +def test_valid_locked_openssh_key(): + valid_key = TEST_OPENSSH_KEY_DATA_LOCKED + pem_objects = validate_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'ed25519' + assert pem_objects[0]['key_enc'] + +def test_valid_rsa1_key(): + valid_key = TEST_SSH_RSA1_KEY_DATA + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa1' + assert not pem_objects[0]['key_enc'] + with pytest.raises(ValidationError): + validate_certificate(valid_key) + pem_objects = validate_ssh_private_key(valid_key) + assert pem_objects[0]['key_type'] == 'rsa1' + assert not pem_objects[0]['key_enc'] + +def test_cert_with_key(): + cert_with_key = TEST_SSH_CERT_KEY + with pytest.raises(ValidationError): + validate_private_key(cert_with_key) + with pytest.raises(ValidationError): + validate_certificate(cert_with_key) + pem_objects = validate_ssh_private_key(cert_with_key) + assert pem_objects[0]['type'] == 'CERTIFICATE' + assert pem_objects[1]['key_type'] == 'rsa' + assert not pem_objects[1]['key_enc'] diff --git a/awx/main/utils.py b/awx/main/utils.py index 0603e05997..0bb8ccc149 100644 --- a/awx/main/utils.py +++ b/awx/main/utils.py @@ -97,14 +97,14 @@ class RequireDebugTrueOrTest(logging.Filter): return settings.DEBUG or 'test' in sys.argv -def memoize(ttl=60): +def memoize(ttl=60, cache_key=None): ''' Decorator to wrap a function and cache its result. ''' from django.core.cache import cache def _memoizer(f, *args, **kwargs): - key = slugify('%s %r %r' % (f.__name__, args, kwargs)) + key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs)) value = cache.get(key) if value is None: value = f(*args, **kwargs) @@ -475,6 +475,7 @@ def cache_list_capabilities(page, prefetch_list, model, user): obj.capabilities_cache[display_method] = True +@memoize() def get_system_task_capacity(): ''' Measure system memory and use it as a baseline for determining the system's capacity @@ -550,8 +551,8 @@ def build_proot_temp_dir(): ''' Create a temporary directory for proot to use. ''' - from awx.main.conf import tower_settings - path = tempfile.mkdtemp(prefix='ansible_tower_proot_', dir=tower_settings.AWX_PROOT_BASE_PATH) + from django.conf import settings + path = tempfile.mkdtemp(prefix='ansible_tower_proot_', dir=settings.AWX_PROOT_BASE_PATH) os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) return path @@ -564,14 +565,13 @@ def wrap_args_with_proot(args, cwd, **kwargs): - /var/log/supervisor - /tmp (except for own tmp files) ''' - from awx.main.conf import tower_settings from django.conf import settings new_args = [getattr(settings, 'AWX_PROOT_CMD', 'proot'), '-v', str(getattr(settings, 'AWX_PROOT_VERBOSITY', '0')), '-r', '/'] hide_paths = ['/etc/tower', '/var/lib/awx', '/var/log', tempfile.gettempdir(), settings.PROJECTS_ROOT, settings.JOBOUTPUT_ROOT] - hide_paths.extend(getattr(tower_settings, 'AWX_PROOT_HIDE_PATHS', None) or []) + hide_paths.extend(getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or []) for path in sorted(set(hide_paths)): if not os.path.exists(path): continue @@ -591,7 +591,7 @@ def wrap_args_with_proot(args, cwd, **kwargs): show_paths.append(settings.ANSIBLE_VENV_PATH) if settings.TOWER_USE_VENV: show_paths.append(settings.TOWER_VENV_PATH) - show_paths.extend(getattr(tower_settings, 'AWX_PROOT_SHOW_PATHS', None) or []) + show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or []) for path in sorted(set(show_paths)): if not os.path.exists(path): continue diff --git a/awx/main/validators.py b/awx/main/validators.py new file mode 100644 index 0000000000..ca7c851772 --- /dev/null +++ b/awx/main/validators.py @@ -0,0 +1,168 @@ +# Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved. + +# Python +import base64 +import re + +# Django +from django.utils.translation import ugettext_lazy as _ +from django.core.exceptions import ValidationError + + +def validate_pem(data, min_keys=0, max_keys=None, min_certs=0, max_certs=None): + """ + Validate the given PEM data is valid and contains the required numbers of + keys and certificates. + + Return a list of PEM objects, where each object is a dict with the following + keys: + - 'all': The entire string for the PEM object including BEGIN/END lines. + - 'type': The type of PEM object ('PRIVATE KEY' or 'CERTIFICATE'). + - 'data': The string inside the BEGIN/END lines. + - 'b64': Key/certificate as a base64-encoded string. + - 'bin': Key/certificate as bytes. + - 'key_type': Only when type == 'PRIVATE KEY', one of 'rsa', 'dsa', + 'ecdsa', 'ed25519' or 'rsa1'. + - 'key_enc': Only when type == 'PRIVATE KEY', boolean indicating if key is + encrypted. + """ + + # Map the X in BEGIN X PRIVATE KEY to the key type (ssh-keygen -t). + # Tower jobs using OPENSSH format private keys may still fail if the + # system SSH implementation lacks support for this format. + private_key_types = { + 'RSA': 'rsa', + 'DSA': 'dsa', + 'EC': 'ecdsa', + 'OPENSSH': 'ed25519', + '': 'rsa1', + } + + # Build regular expressions for matching each object in the PEM file. + pem_obj_re = re.compile( + r'^(-{4,}) *BEGIN ([A-Z ]+?) *\1[\r\n]+' + + r'(.+?)[\r\n]+\1 *END \2 *\1[\r\n]?(.*?)$', re.DOTALL, + ) + pem_obj_header_re = re.compile(r'^(.+?):\s*?(.+?)(\\??)$') + + pem_objects = [] + key_count, cert_count = 0, 0 + data = data.lstrip() + while data: + match = pem_obj_re.match(data) + if not match: + raise ValidationError(_('Invalid certificate or key: %r...') % data[:100]) + data = match.group(4).lstrip() + + # Check PEM object type, check key type if private key. + pem_obj_info = {} + pem_obj_info['all'] = match.group(0) + pem_obj_info['type'] = pem_obj_type = match.group(2) + if pem_obj_type.endswith('PRIVATE KEY'): + key_count += 1 + pem_obj_info['type'] = 'PRIVATE KEY' + key_type = pem_obj_type.replace('PRIVATE KEY', '').strip() + try: + pem_obj_info['key_type'] = private_key_types[key_type] + except KeyError: + raise ValidationError(_('Invalid private key: unsupported type "%s"') % key_type) + elif pem_obj_type == 'CERTIFICATE': + cert_count += 1 + else: + raise ValidationError(_('Unsupported PEM object type: "%s"') % pem_obj_type) + + # Ensure that this PEM object is valid base64 data. + pem_obj_info['data'] = match.group(3) + base64_data = '' + line_continues = False + for line in pem_obj_info['data'].splitlines(): + line = line.strip() + if not line: + continue + if line_continues: + line_continues = line.endswith('\\') + continue + line_match = pem_obj_header_re.match(line) + if line_match: + line_continues = line.endswith('\\') + continue + base64_data += line + try: + decoded_data = base64.b64decode(base64_data) + if not decoded_data: + raise TypeError + pem_obj_info['b64'] = base64_data + pem_obj_info['bin'] = decoded_data + except TypeError: + raise ValidationError(_('Invalid base64-encoded data')) + + # If private key, check whether it is encrypted. + if pem_obj_info.get('key_type', '') == 'ed25519': + # See https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L3218 + # Decoded key data starts with magic string (null-terminated), four byte + # length field, followed by the ciphername -- if ciphername is anything + # other than 'none' the key is encrypted. + pem_obj_info['key_enc'] = not bool(pem_obj_info['bin'].startswith('openssh-key-v1\x00\x00\x00\x00\x04none')) + elif pem_obj_info.get('key_type', ''): + pem_obj_info['key_enc'] = bool('ENCRYPTED' in pem_obj_info['data']) + + pem_objects.append(pem_obj_info) + + # Validate that the number of keys and certs provided are within the limits. + key_count_dict = dict(min_keys=min_keys, max_keys=max_keys, key_count=key_count) + if key_count < min_keys: + if min_keys == 1: + if max_keys == min_keys: + raise ValidationError(_('Exactly one private key is required.')) + else: + raise ValidationError(_('At least one private key is required.')) + else: + raise ValidationError(_('At least %(min_keys)d private keys are required, only %(key_count)d provided.') % key_count_dict) + elif max_keys is not None and key_count > max_keys: + if max_keys == 1: + raise ValidationError(_('Only one private key is allowed, %(key_count)d provided.') % key_count_dict) + else: + raise ValidationError(_('No more than %(max_keys)d private keys are allowed, %(key_count)d provided.') % key_count_dict) + cert_count_dict = dict(min_certs=min_certs, max_certs=max_certs, cert_count=cert_count) + if cert_count < min_certs: + if min_certs == 1: + if max_certs == min_certs: + raise ValidationError(_('Exactly one certificate is required.')) + else: + raise ValidationError(_('At least one certificate is required.')) + else: + raise ValidationError(_('At least %(min_certs)d certificates are required, only %(cert_count)d provided.') % cert_count_dict) + elif max_certs is not None and cert_count > max_certs: + if max_certs == 1: + raise ValidationError(_('Only one certificate is allowed, %(cert_count)d provided.') % cert_count_dict) + else: + raise ValidationError(_('No more than %(max_certs)d certificates are allowed, %(cert_count)d provided.') % cert_count_dict) + + return pem_objects + + +def validate_private_key(data): + """ + Validate that data contains exactly one private key. + """ + return validate_pem(data, min_keys=1, max_keys=1, max_certs=0) + + +def validate_certificate(data): + """ + Validate that data contains one or more certificates. Adds BEGIN/END lines + if necessary. + """ + if 'BEGIN CERTIFICATE' not in data: + data = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----\n'.format(data) + return validate_pem(data, max_keys=0, min_certs=1) + + +def validate_ssh_private_key(data): + """ + Validate that data contains at least one private key and optionally + certificates; should handle any valid options for ssh_private_key on a + credential. + """ + return validate_pem(data, min_keys=1) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 31c8b3b8f3..33f2868bbb 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -116,6 +116,7 @@ LOG_ROOT = os.path.join(BASE_DIR) SCHEDULE_METADATA_LOCATION = os.path.join(BASE_DIR, '.tower_cycle') # Maximum number of the same job that can be waiting to run when launching from scheduler +# Note: This setting may be overridden by database settings. SCHEDULE_MAX_JOBS = 10 SITE_ID = 1 @@ -132,6 +133,7 @@ ALLOWED_HOSTS = [] # reverse proxy. REMOTE_HOST_HEADERS = ['REMOTE_ADDR', 'REMOTE_HOST'] +# Note: This setting may be overridden by database settings. STDOUT_MAX_BYTES_DISPLAY = 1048576 TEMPLATE_CONTEXT_PROCESSORS = ( # NOQA @@ -190,6 +192,7 @@ INSTALLED_APPS = ( 'polymorphic', 'taggit', 'social.apps.django_app.default', + 'awx.conf', 'awx.main', 'awx.api', 'awx.ui', @@ -242,28 +245,34 @@ AUTHENTICATION_BACKENDS = ( ) # LDAP server (default to None to skip using LDAP authentication). +# Note: This setting may be overridden by database settings. AUTH_LDAP_SERVER_URI = None # Disable LDAP referrals by default (to prevent certain LDAP queries from # hanging with AD). +# Note: This setting may be overridden by database settings. AUTH_LDAP_CONNECTION_OPTIONS = { ldap.OPT_REFERRALS: 0, } # Radius server settings (default to empty string to skip using Radius auth). +# Note: These settings may be overridden by database settings. RADIUS_SERVER = '' RADIUS_PORT = 1812 RADIUS_SECRET = '' # Seconds before auth tokens expire. +# Note: This setting may be overridden by database settings. AUTH_TOKEN_EXPIRATION = 1800 # Maximum number of per-user valid, concurrent tokens. # -1 is unlimited +# Note: This setting may be overridden by database settings. AUTH_TOKEN_PER_USER = -1 # Enable / Disable HTTP Basic Authentication used in the API browser # Note: Session limits are not enforced when using HTTP Basic Authentication. +# Note: This setting may be overridden by database settings. AUTH_BASIC_ENABLED = True # If set, serve only minified JS for UI. @@ -407,6 +416,20 @@ SOCIAL_AUTH_PIPELINE = ( 'awx.sso.pipeline.update_user_teams', ) +SOCIAL_AUTH_LOGIN_URL = '/' +SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/' +SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/' +SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/' + +SOCIAL_AUTH_RAISE_EXCEPTIONS = False +SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False +SOCIAL_AUTH_SLUGIFY_USERNAMES = True +SOCIAL_AUTH_CLEAN_USERNAMES = True + +SOCIAL_AUTH_SANITIZE_REDIRECTS = True +SOCIAL_AUTH_REDIRECT_IS_HTTPS = False + +# Note: These settings may be overridden by database settings. SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] @@ -433,19 +456,6 @@ SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {} SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {} SOCIAL_AUTH_SAML_ENABLED_IDPS = {} -SOCIAL_AUTH_LOGIN_URL = '/' -SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/sso/complete/' -SOCIAL_AUTH_LOGIN_ERROR_URL = '/sso/error/' -SOCIAL_AUTH_INACTIVE_USER_URL = '/sso/inactive/' - -SOCIAL_AUTH_RAISE_EXCEPTIONS = False -SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = False -SOCIAL_AUTH_SLUGIFY_USERNAMES = True -SOCIAL_AUTH_CLEAN_USERNAMES = True - -SOCIAL_AUTH_SANITIZE_REDIRECTS = True -SOCIAL_AUTH_REDIRECT_IS_HTTPS = False - SOCIAL_AUTH_ORGANIZATION_MAP = {} SOCIAL_AUTH_TEAM_MAP = {} @@ -480,30 +490,37 @@ JOB_EVENT_MAX_QUEUE_SIZE = 100 CAPTURE_JOB_EVENT_HOSTS = False # Enable proot support for running jobs (playbook runs only). +# Note: This setting may be overridden by database settings. AWX_PROOT_ENABLED = False # Command/path to proot. AWX_PROOT_CMD = 'proot' # Additional paths to hide from jobs using proot. +# Note: This setting may be overridden by database settings. AWX_PROOT_HIDE_PATHS = [] # Additional paths to show for jobs using proot. +# Note: This setting may be overridden by database settings. AWX_PROOT_SHOW_PATHS = [] # Number of jobs to show as part of the job template history AWX_JOB_TEMPLATE_HISTORY = 10 # The directory in which proot will create new temporary directories for its root +# Note: This setting may be overridden by database settings. AWX_PROOT_BASE_PATH = "/tmp" # User definable ansible callback plugins +# Note: This setting may be overridden by database settings. AWX_ANSIBLE_CALLBACK_PLUGINS = "" # Enable Pendo on the UI, possible values are 'off', 'anonymous', and 'detailed' +# Note: This setting may be overridden by database settings. PENDO_TRACKING_STATE = "off" # Default list of modules allowed for ad hoc commands. +# Note: This setting may be overridden by database settings. AD_HOC_COMMANDS = [ 'command', 'shell', @@ -730,6 +747,7 @@ CLOUDFORMS_INSTANCE_ID_VAR = 'id' # -- Activity Stream -- # --------------------- # Defaults for enabling/disabling activity stream. +# Note: These settings may be overridden by database settings. ACTIVITY_STREAM_ENABLED = True ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False @@ -745,161 +763,17 @@ SOCKETIO_LISTEN_PORT = 8080 FACT_CACHE_PORT = 6564 +# Note: This setting may be overridden by database settings. ORG_ADMINS_CAN_SEE_ALL_USERS = True +# Note: This setting may be overridden by database settings. TOWER_ADMIN_ALERTS = True +# Note: This setting may be overridden by database settings. TOWER_URL_BASE = "https://towerhost" -TOWER_SETTINGS_MANIFEST = { - "SCHEDULE_MAX_JOBS": { - "name": "Maximum Scheduled Jobs", - "description": "Maximum number of the same job template that can be waiting to run when launching from a schedule before no more are created", - "default": SCHEDULE_MAX_JOBS, - "type": "int", - "category": "jobs", - }, - "STDOUT_MAX_BYTES_DISPLAY": { - "name": "Standard Output Maximum Display Size", - "description": "Maximum Size of Standard Output in bytes to display before requiring the output be downloaded", - "default": STDOUT_MAX_BYTES_DISPLAY, - "type": "int", - "category": "jobs", - }, - "AUTH_TOKEN_EXPIRATION": { - "name": "Idle Time Force Log Out", - "description": "Number of seconds that a user is inactive before they will need to login again", - "type": "int", - "default": AUTH_TOKEN_EXPIRATION, - "category": "authentication", - }, - "AUTH_TOKEN_PER_USER": { - "name": "Maximum number of simultaneous logins", - "description": "Maximum number of simultaneous logins a user may have. To disable enter -1", - "type": "int", - "default": AUTH_TOKEN_PER_USER, - "category": "authentication", - }, - # "AUTH_BASIC_ENABLED": { - # "name": "Enable HTTP Basic Auth", - # "description": "Enable HTTP Basic Auth for the API Browser", - # "default": AUTH_BASIC_ENABLED, - # "type": "bool", - # "category": "authentication", - # }, - # "AUTH_LDAP_SERVER_URI": { - # "name": "LDAP Server URI", - # "description": "URI Location of the LDAP Server", - # "default": AUTH_LDAP_SERVER_URI, - # "type": "string", - # "category": "authentication", - # }, - # "RADIUS_SERVER": { - # "name": "Radius Server Host", - # "description": "Host to communicate with for Radius Authentication", - # "default": RADIUS_SERVER, - # "type": "string", - # "category": "authentication", - # }, - # "RADIUS_PORT": { - # "name": "Radius Server Port", - # "description": "Port on the Radius host for Radius Authentication", - # "default": RADIUS_PORT, - # "type": "string", - # "category": "authentication", - # }, - # "RADIUS_SECRET": { - # "name": "Radius Server Secret", - # "description": "Secret used when negotiating with the Radius server", - # "default": RADIUS_SECRET, - # "type": "string", - # "category": "authentication", - # }, - "AWX_PROOT_ENABLED": { - "name": "Enable PRoot for Job Execution", - "description": "Isolates an Ansible job from protected parts of the Tower system to prevent exposing sensitive information", - "default": AWX_PROOT_ENABLED, - "type": "bool", - "category": "jobs", - }, - "AWX_PROOT_HIDE_PATHS": { - "name": "Paths to hide from PRoot jobs", - "description": "Extra paths to hide from PRoot isolated processes", - "default": AWX_PROOT_HIDE_PATHS, - "type": "list", - "category": "jobs", - }, - "AWX_PROOT_SHOW_PATHS": { - "name": "Paths to expose to PRoot jobs", - "description": "Explicit whitelist of paths to expose to PRoot jobs", - "default": AWX_PROOT_SHOW_PATHS, - "type": "list", - "category": "jobs", - }, - "AWX_PROOT_BASE_PATH": { - "name": "Base PRoot execution path", - "description": "The location that PRoot will create its temporary working directory", - "default": AWX_PROOT_BASE_PATH, - "type": "string", - "category": "jobs", - }, - "AWX_ANSIBLE_CALLBACK_PLUGINS": { - "name": "Ansible Callback Plugins", - "description": "Colon Seperated Paths for extra callback plugins to be used when running jobs", - "default": AWX_ANSIBLE_CALLBACK_PLUGINS, - "type": "string", - "category": "jobs", - }, - "PENDO_TRACKING_STATE": { - "name": "Analytics Tracking State", - "description": "Enable or Disable Analytics Tracking", - "default": PENDO_TRACKING_STATE, - "type": "string", - "category": "ui", - }, - "AD_HOC_COMMANDS": { - "name": "Ansible Modules Allowed for Ad Hoc Jobs", - "description": "A colon-seperated whitelist of modules allowed to be used by ad-hoc jobs", - "default": AD_HOC_COMMANDS, - "type": "list", - "category": "jobs", - }, - "ACTIVITY_STREAM_ENABLED": { - "name": "Enable Activity Stream", - "description": "Enable capturing activity for the Tower activity stream", - "default": ACTIVITY_STREAM_ENABLED, - "type": "bool", - "category": "system", - }, - "ORG_ADMINS_CAN_SEE_ALL_USERS": { - "name": "All Users Visible to Organization Admins", - "description": "Controls whether any Organization Admin can view all users, even those not associated with their Organization", - "default": ORG_ADMINS_CAN_SEE_ALL_USERS, - "type": "bool", - "category": "system", - }, - "TOWER_ADMIN_ALERTS": { - "name": "Enable Tower Administrator Alerts", - "description": "Allow Tower to email Admin users for system events that may require attention", - "default": TOWER_ADMIN_ALERTS, - "type": "bool", - "category": "system", - }, - "TOWER_URL_BASE": { - "name": "Base URL of the Tower host", - "description": "This is used by services like Notifications to render a valid url to the Tower host", - "default": TOWER_URL_BASE, - "type": "string", - "category": "system", - }, - "LICENSE": { - "name": "Tower License", - "description": "Controls what features and functionality is enabled in Tower.", - "default": "{}", - "type": "string", - "category": "system", - }, -} +TOWER_SETTINGS_MANIFEST = {} + # Logging configuration. LOGGING = { 'version': 1, @@ -1030,6 +904,11 @@ LOGGING = { 'handlers': ['console', 'file', 'tower_warnings'], 'level': 'DEBUG', }, + 'awx.conf': { + 'handlers': ['console', 'file', 'tower_warnings'], + 'level': 'WARNING', + 'propagate': False, + }, 'awx.main.commands.run_callback_receiver': { 'handlers': ['console', 'file', 'callback_receiver'], 'propagate': False diff --git a/awx/settings/development.py b/awx/settings/development.py index 18072e6bc6..c19afa34e8 100644 --- a/awx/settings/development.py +++ b/awx/settings/development.py @@ -4,6 +4,7 @@ # Development settings for AWX project. # Python +import copy import sys import traceback @@ -38,9 +39,12 @@ if 'celeryd' in sys.argv: CALLBACK_QUEUE = "callback_tasks" -# Enable PROOT for tower-qa integration tests +# Enable PROOT for tower-qa integration tests. +# Note: This setting may be overridden by database settings. AWX_PROOT_ENABLED = True +# Disable Pendo on the UI for development/test. +# Note: This setting may be overridden by database settings. PENDO_TRACKING_STATE = "off" # Use Django-Jenkins if installed. Only run tests for awx.main app. @@ -75,6 +79,15 @@ PASSWORD_HASHERS = ( # Configure a default UUID for development only. SYSTEM_UUID = '00000000-0000-0000-0000-000000000000' +# Store a snapshot of default settings at this point (only for migrating from +# file to database settings). +if 'migrate_to_database_settings' in sys.argv: + DEFAULTS_SNAPSHOT = {} + this_module = sys.modules[__name__] + for setting in dir(this_module): + if setting == setting.upper(): + DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting)) + # If there is an `/etc/tower/settings.py`, include it. # If there is a `/etc/tower/conf.d/*.py`, include them. include(optional('/etc/tower/settings.py'), scope=locals()) @@ -90,8 +103,6 @@ TOWER_VENV_PATH = "/venv/tower" # only the defaults. try: include(optional('local_*.py'), scope=locals()) - if not is_testing(sys.argv): - include('postprocess.py', scope=locals()) except ImportError: traceback.print_exc() sys.exit(1) diff --git a/awx/settings/local_settings.py.docker_compose b/awx/settings/local_settings.py.docker_compose index 4c20102746..c87a209501 100644 --- a/awx/settings/local_settings.py.docker_compose +++ b/awx/settings/local_settings.py.docker_compose @@ -192,141 +192,13 @@ LOGGING['handlers']['syslog'] = { #LOGGING['loggers']['awx.main.signals']['propagate'] = True #LOGGING['loggers']['awx.main.permissions']['propagate'] = True +# Enable the following line to turn on database settings logging. +#LOGGING['loggers']['awx.conf']['level'] = 'DEBUG' + # Enable the following lines to turn on LDAP auth logging. #LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console'] #LOGGING['loggers']['django_auth_ldap']['level'] = 'DEBUG' -############################################################################### -# LDAP AUTHENTICATION SETTINGS -############################################################################### - -# Refer to django-auth-ldap docs for more details: -# http://pythonhosted.org/django-auth-ldap/authentication.html - -# Imports needed for LDAP configuration. -import ldap -from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion -from django_auth_ldap.config import ActiveDirectoryGroupType - -# LDAP server URI, such as "ldap://ldap.example.com:389" (non-SSL) or -# "ldaps://ldap.example.com:636" (SSL). LDAP authentication is disable if this -# parameter is empty. -AUTH_LDAP_SERVER_URI = '' - -# DN of user to bind for all search queries. Normally in the format -# "CN=Some User,OU=Users,DC=example,DC=com" but may also be specified as -# "DOMAIN\username" for Active Directory. -AUTH_LDAP_BIND_DN = '' - -# Password using to bind above user account. -AUTH_LDAP_BIND_PASSWORD = '' - -# Enable TLS when the connection is not using SSL. -AUTH_LDAP_START_TLS = False - -# Additional options to set for the LDAP connection. LDAP referrals are -# disabled by default (to prevent certain LDAP queries from hanging with AD). -AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP search query to find users. -AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'OU=Users,DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(sAMAccountName=%(user)s)', # Query -) - -# Alternative to user search, if user DNs are all of the same format. -#AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP to user atrributes (key is user attribute name, value is LDAP -# attribute name). -AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query to find groups. Does not support LDAPSearchUnion. -AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(objectClass=group)', # Query -) -# Type of group returned by the search above. Should be one of the types -# listed at: http://pythonhosted.org/django-auth-ldap/groups.html#types-of-groups -AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Group DN required to login. If specified, user must be a member of this -# group to login via LDAP. -#AUTH_LDAP_REQUIRE_GROUP = '' - -# Group DN denied from login. If specified, user will not be allowed to login -# if a member of this group. -#AUTH_LDAP_DENY_GROUP = '' - -# User profile flags updated from group membership (key is user attribute name, -# value is group DN). -AUTH_LDAP_USER_FLAGS_BY_GROUP = { - #'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Mapping between organization admins/users and LDAP groups. Keys are -# organization names (will be created if not present). Values are dictionaries -# of options for each organization's membership, where each can contain the -# following parameters: -# - remove: True/False. Defaults to False. Specifies the default for -# remove_admins or remove_users if those parameters aren't explicitly set. -# - admins: None, True/False, string or list/tuple of strings. -# If None, organization admins will not be updated. -# If True/False, all LDAP users will be added/removed as admins. -# If a string or list of strings, specifies the group DN(s). User will be -# added as an org admin if the user is a member of ANY of these groups. -# - remove_admins: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's admins. -# - users: None, True/False, string or list/tuple of strings. Same rules apply -# as for admins. -# - remove_users: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's users. -AUTH_LDAP_ORGANIZATION_MAP = { - #'Test Org': { - # 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - # 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - #}, - #'Test Org 2': { - # 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - # 'users': True, - #}, -} - -# Mapping between team members (users) and LDAP groups. Keys are team names -# (will be created if not present). Values are dictionaries of options for -# each team's membership, where each can contain the following parameters: -# - organization: string. The name of the organization to which the team -# belongs. The team will be created if the combination of organization and -# team name does not exist. The organization will first be created if it -# does not exist. -# - users: None, True/False, string or list/tuple of strings. -# If None, team members will not be updated. -# If True/False, all LDAP users will be added/removed as team members. -# If a string or list of strings, specifies the group DN(s). User will be -# added as a team member if the user is a member of ANY of these groups. -# - remove: True/False. Defaults to False. If True, a user who is not a member -# of the given groups will be removed from the team. -AUTH_LDAP_TEAM_MAP = { - 'My Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': True, - }, - 'Other Team': { - 'organization': 'Test Org 2', - 'users': 'CN=Other Users,CN=Users,DC=example,DC=com', - 'remove': False, - }, -} - ############################################################################### # SCM TEST SETTINGS ############################################################################### @@ -365,280 +237,6 @@ import getpass TEST_SSH_LOOPBACK_USERNAME = getpass.getuser() TEST_SSH_LOOPBACK_PASSWORD = '' -############################################################################### -# LDAP TEST SETTINGS -############################################################################### - -# LDAP connection and authentication settings for unit tests only. LDAP tests -# will be skipped if TEST_AUTH_LDAP_SERVER_URI is not configured. - -TEST_AUTH_LDAP_SERVER_URI = '' -TEST_AUTH_LDAP_BIND_DN = '' -TEST_AUTH_LDAP_BIND_PASSWORD = '' -TEST_AUTH_LDAP_START_TLS = False -TEST_AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP username/password for testing authentication. -TEST_AUTH_LDAP_USERNAME = '' -TEST_AUTH_LDAP_PASSWORD = '' - -# LDAP search query to find users. -TEST_AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'CN=Users,DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(sAMAccountName=%(user)s)', -) - -# Alternative to user search. -#TEST_AUTH_LDAP_USER_DN_TEMPLATE = 'sAMAccountName=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP attributes to user attributes. -TEST_AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query for finding groups. -TEST_AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(objectClass=group)', -) -# Type of group returned by the search above. -TEST_AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Test DNs for a group required to login. User should be a member of the first -# group, but not a member of the second. -TEST_AUTH_LDAP_REQUIRE_GROUP = 'CN=Domain Admins,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_REQUIRE_GROUP_FAIL = 'CN=Guest,CN=Users,DC=example,DC=com' - -# Test DNs for a group denied from login. User should not be a member of the -# first group, but should be a member of the second. -TEST_AUTH_LDAP_DENY_GROUP = 'CN=Guest,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_DENY_GROUP_FAIL = 'CN=Domain Admins,CN=Users,DC=example,DC=com' - -# User profile flags updated from group membership. Test user should be a -# member of the group. -TEST_AUTH_LDAP_USER_FLAGS_BY_GROUP = { - 'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Test mapping between organization admins/users and LDAP groups. -TEST_AUTH_LDAP_ORGANIZATION_MAP = { - 'Test Org': { - 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - }, - 'Test Org 2': { - 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': True, - }, -} -# Expected results from organization mapping. After login, should user be an -# admin/user in the given organization? -TEST_AUTH_LDAP_ORGANIZATION_MAP_RESULT = { - 'Test Org': {'admins': True, 'users': False}, - 'Test Org 2': {'admins': False, 'users': True}, -} - -# Second test mapping to test remove parameters. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2 = { - 'Test Org': { - 'admins': 'CN=Domain Users,CN=Users,DC=example,DC=com', - 'users': True, - 'remove_admins': True, - 'remove_users': False, - }, - 'Test Org 2': { - 'admins': ['CN=Domain Admins,CN=Users,DC=example,DC=com', - 'CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': False, - 'remove': True, - }, -} - -# Expected results from second organization mapping. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2_RESULT = { - 'Test Org': {'admins': False, 'users': True}, - 'Test Org 2': {'admins': True, 'users': False}, -} - -# Test mapping between team users and LDAP groups. -TEST_AUTH_LDAP_TEAM_MAP = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': True, - }, -} -# Expected results from team mapping. After login, should user be a member of -# the given team? -TEST_AUTH_LDAP_TEAM_MAP_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': True}, - 'Everyone Team': {'users': True}, -} - -# Second test mapping for teams to remove user. -TEST_AUTH_LDAP_TEAM_MAP_2 = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Administrators,CN=Builtin,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': False, - 'remove': False, - }, -} -# Expected results from second team mapping. After login, should user be a -# member of the given team? -TEST_AUTH_LDAP_TEAM_MAP_2_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': False}, - 'Everyone Team': {'users': True}, -} - -############################################################################### -# RADIUS AUTH SETTINGS -############################################################################### - -RADIUS_SERVER = '' -RADIUS_PORT = 1812 -RADIUS_SECRET = '' - -############################################################################### -# SOCIAL AUTH SETTINGS -############################################################################### - -SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' -SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' -#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['example.com'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS = {'hd': 'example.com'} - -SOCIAL_AUTH_GITHUB_KEY = '' -SOCIAL_AUTH_GITHUB_SECRET = '' - -SOCIAL_AUTH_GITHUB_ORG_KEY = '' -SOCIAL_AUTH_GITHUB_ORG_SECRET = '' -SOCIAL_AUTH_GITHUB_ORG_NAME = '' - -SOCIAL_AUTH_GITHUB_TEAM_KEY = '' -SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' -SOCIAL_AUTH_GITHUB_TEAM_ID = '' - -SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' -SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' -SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' -SOCIAL_AUTH_SAML_ORG_INFO = { - 'en-US': { - 'name': 'example', - 'displayname': 'Example', - 'url': 'http://www.example.com', - }, -} -SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_SUPPORT_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_ENABLED_IDPS = { - #'myidp': { - # 'entity_id': 'https://idp.example.com', - # 'url': 'https://myidp.example.com/sso', - # 'x509cert': '', - #}, - #'onelogin': { - # 'entity_id': 'https://app.onelogin.com/saml/metadata/123456', - # 'url': 'https://example.onelogin.com/trust/saml2/http-post/sso/123456', - # 'x509cert': '', - # 'attr_user_permanent_id': 'name_id', - # 'attr_first_name': 'User.FirstName', - # 'attr_last_name': 'User.LastName', - # 'attr_username': 'User.email', - # 'attr_email': 'User.email', - #}, -} - -SOCIAL_AUTH_ORGANIZATION_MAP = { - # Add all users to the default organization. - 'Default': { - 'users': True, - }, - #'Test Org': { - # 'admins': ['admin@example.com'], - # 'users': True, - #}, - #'Test Org 2': { - # 'admins': ['admin@example.com', re.compile(r'^tower-[^@]+*?@.*$], - # 'users': re.compile(r'^[^@].*?@example\.com$'), - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_SAML_ORGANIZATION_MAP = {} - -SOCIAL_AUTH_TEAM_MAP = { - #'My Team': { - # 'organization': 'Test Org', - # 'users': ['re.compile(r'^[^@]+?@test\.example\.com$')'], - # 'remove': True, - #}, - #'Other Team': { - # 'organization': 'Test Org 2', - # 'users': re.compile(r'^[^@]+?@test2\.example\.com$'), - # 'remove': False, - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP = {} -#SOCIAL_AUTH_SAML_TEAM_MAP = {} - -# Uncomment the line below (i.e. set SOCIAL_AUTH_USER_FIELDS to an empty list) -# to prevent new user accounts from being created. Only users who have -# previously logged in using social auth or have a user account with a matching -# email address will be able to login. - -#SOCIAL_AUTH_USER_FIELDS = [] - -# It is also possible to add custom functions to the social auth pipeline for -# more advanced organization and team mapping. Use at your own risk. - -#def custom_social_auth_pipeline_function(backend, details, user=None, *args, **kwargs): -# print 'custom:', backend, details, user, args, kwargs - -#SOCIAL_AUTH_PIPELINE += ( -# 'awx.settings.development.custom_social_auth_pipeline_function', -#) - ############################################################################### # INVENTORY IMPORT TEST SETTINGS ############################################################################### diff --git a/awx/settings/local_settings.py.example b/awx/settings/local_settings.py.example index cb85724366..20217fa538 100644 --- a/awx/settings/local_settings.py.example +++ b/awx/settings/local_settings.py.example @@ -156,141 +156,13 @@ LOGGING['handlers']['syslog'] = { #LOGGING['loggers']['awx.main.signals']['propagate'] = True #LOGGING['loggers']['awx.main.permissions']['propagate'] = True +# Enable the following line to turn on database settings logging. +#LOGGING['loggers']['awx.conf']['level'] = 'DEBUG' + # Enable the following lines to turn on LDAP auth logging. #LOGGING['loggers']['django_auth_ldap']['handlers'] = ['console'] #LOGGING['loggers']['django_auth_ldap']['level'] = 'DEBUG' -############################################################################### -# LDAP AUTHENTICATION SETTINGS -############################################################################### - -# Refer to django-auth-ldap docs for more details: -# http://pythonhosted.org/django-auth-ldap/authentication.html - -# Imports needed for LDAP configuration. -import ldap -from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion -from django_auth_ldap.config import ActiveDirectoryGroupType - -# LDAP server URI, such as "ldap://ldap.example.com:389" (non-SSL) or -# "ldaps://ldap.example.com:636" (SSL). LDAP authentication is disable if this -# parameter is empty. -AUTH_LDAP_SERVER_URI = '' - -# DN of user to bind for all search queries. Normally in the format -# "CN=Some User,OU=Users,DC=example,DC=com" but may also be specified as -# "DOMAIN\username" for Active Directory. -AUTH_LDAP_BIND_DN = '' - -# Password using to bind above user account. -AUTH_LDAP_BIND_PASSWORD = '' - -# Enable TLS when the connection is not using SSL. -AUTH_LDAP_START_TLS = False - -# Additional options to set for the LDAP connection. LDAP referrals are -# disabled by default (to prevent certain LDAP queries from hanging with AD). -AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP search query to find users. -AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'OU=Users,DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(sAMAccountName=%(user)s)', # Query -) - -# Alternative to user search, if user DNs are all of the same format. -#AUTH_LDAP_USER_DN_TEMPLATE = 'uid=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP to user atrributes (key is user attribute name, value is LDAP -# attribute name). -AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query to find groups. Does not support LDAPSearchUnion. -AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', # Base DN - ldap.SCOPE_SUBTREE, # SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE - '(objectClass=group)', # Query -) -# Type of group returned by the search above. Should be one of the types -# listed at: http://pythonhosted.org/django-auth-ldap/groups.html#types-of-groups -AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Group DN required to login. If specified, user must be a member of this -# group to login via LDAP. -#AUTH_LDAP_REQUIRE_GROUP = '' - -# Group DN denied from login. If specified, user will not be allowed to login -# if a member of this group. -#AUTH_LDAP_DENY_GROUP = '' - -# User profile flags updated from group membership (key is user attribute name, -# value is group DN). -AUTH_LDAP_USER_FLAGS_BY_GROUP = { - #'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Mapping between organization admins/users and LDAP groups. Keys are -# organization names (will be created if not present). Values are dictionaries -# of options for each organization's membership, where each can contain the -# following parameters: -# - remove: True/False. Defaults to False. Specifies the default for -# remove_admins or remove_users if those parameters aren't explicitly set. -# - admins: None, True/False, string or list/tuple of strings. -# If None, organization admins will not be updated. -# If True/False, all LDAP users will be added/removed as admins. -# If a string or list of strings, specifies the group DN(s). User will be -# added as an org admin if the user is a member of ANY of these groups. -# - remove_admins: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's admins. -# - users: None, True/False, string or list/tuple of strings. Same rules apply -# as for admins. -# - remove_users: True/False. Defaults to False. If True, a user who is not a -# member of the given groups will be removed from the organization's users. -AUTH_LDAP_ORGANIZATION_MAP = { - #'Test Org': { - # 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - # 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - #}, - #'Test Org 2': { - # 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - # 'users': True, - #}, -} - -# Mapping between team members (users) and LDAP groups. Keys are team names -# (will be created if not present). Values are dictionaries of options for -# each team's membership, where each can contain the following parameters: -# - organization: string. The name of the organization to which the team -# belongs. The team will be created if the combination of organization and -# team name does not exist. The organization will first be created if it -# does not exist. -# - users: None, True/False, string or list/tuple of strings. -# If None, team members will not be updated. -# If True/False, all LDAP users will be added/removed as team members. -# If a string or list of strings, specifies the group DN(s). User will be -# added as a team member if the user is a member of ANY of these groups. -# - remove: True/False. Defaults to False. If True, a user who is not a member -# of the given groups will be removed from the team. -AUTH_LDAP_TEAM_MAP = { - 'My Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': True, - }, - 'Other Team': { - 'organization': 'Test Org 2', - 'users': 'CN=Other Users,CN=Users,DC=example,DC=com', - 'remove': False, - }, -} - ############################################################################### # SCM TEST SETTINGS ############################################################################### @@ -329,280 +201,6 @@ import getpass TEST_SSH_LOOPBACK_USERNAME = getpass.getuser() TEST_SSH_LOOPBACK_PASSWORD = '' -############################################################################### -# LDAP TEST SETTINGS -############################################################################### - -# LDAP connection and authentication settings for unit tests only. LDAP tests -# will be skipped if TEST_AUTH_LDAP_SERVER_URI is not configured. - -TEST_AUTH_LDAP_SERVER_URI = '' -TEST_AUTH_LDAP_BIND_DN = '' -TEST_AUTH_LDAP_BIND_PASSWORD = '' -TEST_AUTH_LDAP_START_TLS = False -TEST_AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0, -} - -# LDAP username/password for testing authentication. -TEST_AUTH_LDAP_USERNAME = '' -TEST_AUTH_LDAP_PASSWORD = '' - -# LDAP search query to find users. -TEST_AUTH_LDAP_USER_SEARCH = LDAPSearch( - 'CN=Users,DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(sAMAccountName=%(user)s)', -) - -# Alternative to user search. -#TEST_AUTH_LDAP_USER_DN_TEMPLATE = 'sAMAccountName=%(user)s,OU=Users,DC=example,DC=com' - -# Mapping of LDAP attributes to user attributes. -TEST_AUTH_LDAP_USER_ATTR_MAP = { - 'first_name': 'givenName', - 'last_name': 'sn', - 'email': 'mail', -} - -# LDAP search query for finding groups. -TEST_AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - 'DC=example,DC=com', - ldap.SCOPE_SUBTREE, - '(objectClass=group)', -) -# Type of group returned by the search above. -TEST_AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType() - -# Test DNs for a group required to login. User should be a member of the first -# group, but not a member of the second. -TEST_AUTH_LDAP_REQUIRE_GROUP = 'CN=Domain Admins,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_REQUIRE_GROUP_FAIL = 'CN=Guest,CN=Users,DC=example,DC=com' - -# Test DNs for a group denied from login. User should not be a member of the -# first group, but should be a member of the second. -TEST_AUTH_LDAP_DENY_GROUP = 'CN=Guest,CN=Users,DC=example,DC=com' -TEST_AUTH_LDAP_DENY_GROUP_FAIL = 'CN=Domain Admins,CN=Users,DC=example,DC=com' - -# User profile flags updated from group membership. Test user should be a -# member of the group. -TEST_AUTH_LDAP_USER_FLAGS_BY_GROUP = { - 'is_superuser': 'CN=Domain Admins,CN=Users,DC=example,DC=com', -} - -# Test mapping between organization admins/users and LDAP groups. -TEST_AUTH_LDAP_ORGANIZATION_MAP = { - 'Test Org': { - 'admins': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - }, - 'Test Org 2': { - 'admins': ['CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': True, - }, -} -# Expected results from organization mapping. After login, should user be an -# admin/user in the given organization? -TEST_AUTH_LDAP_ORGANIZATION_MAP_RESULT = { - 'Test Org': {'admins': True, 'users': False}, - 'Test Org 2': {'admins': False, 'users': True}, -} - -# Second test mapping to test remove parameters. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2 = { - 'Test Org': { - 'admins': 'CN=Domain Users,CN=Users,DC=example,DC=com', - 'users': True, - 'remove_admins': True, - 'remove_users': False, - }, - 'Test Org 2': { - 'admins': ['CN=Domain Admins,CN=Users,DC=example,DC=com', - 'CN=Administrators,CN=Builtin,DC=example,DC=com'], - 'users': False, - 'remove': True, - }, -} - -# Expected results from second organization mapping. -TEST_AUTH_LDAP_ORGANIZATION_MAP_2_RESULT = { - 'Test Org': {'admins': False, 'users': True}, - 'Test Org 2': {'admins': True, 'users': False}, -} - -# Test mapping between team users and LDAP groups. -TEST_AUTH_LDAP_TEAM_MAP = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Domain Admins,CN=Users,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': True, - }, -} -# Expected results from team mapping. After login, should user be a member of -# the given team? -TEST_AUTH_LDAP_TEAM_MAP_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': True}, - 'Everyone Team': {'users': True}, -} - -# Second test mapping for teams to remove user. -TEST_AUTH_LDAP_TEAM_MAP_2 = { - 'Domain Users Team': { - 'organization': 'Test Org', - 'users': ['CN=Domain Users,CN=Users,DC=example,DC=com'], - 'remove': False, - }, - 'Admins Team': { - 'organization': 'Admins Org', - 'users': 'CN=Administrators,CN=Builtin,DC=example,DC=com', - 'remove': True, - }, - 'Everyone Team': { - 'organization': 'Test Org 2', - 'users': False, - 'remove': False, - }, -} -# Expected results from second team mapping. After login, should user be a -# member of the given team? -TEST_AUTH_LDAP_TEAM_MAP_2_RESULT = { - 'Domain Users Team': {'users': False}, - 'Admins Team': {'users': False}, - 'Everyone Team': {'users': True}, -} - -############################################################################### -# RADIUS AUTH SETTINGS -############################################################################### - -RADIUS_SERVER = '' -RADIUS_PORT = 1812 -RADIUS_SECRET = '' - -############################################################################### -# SOCIAL AUTH SETTINGS -############################################################################### - -SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '' -SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = '' -#SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['profile'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['example.com'] -#SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS = {'hd': 'example.com'} - -SOCIAL_AUTH_GITHUB_KEY = '' -SOCIAL_AUTH_GITHUB_SECRET = '' - -SOCIAL_AUTH_GITHUB_ORG_KEY = '' -SOCIAL_AUTH_GITHUB_ORG_SECRET = '' -SOCIAL_AUTH_GITHUB_ORG_NAME = '' - -SOCIAL_AUTH_GITHUB_TEAM_KEY = '' -SOCIAL_AUTH_GITHUB_TEAM_SECRET = '' -SOCIAL_AUTH_GITHUB_TEAM_ID = '' - -SOCIAL_AUTH_SAML_SP_ENTITY_ID = '' -SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = '' -SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = '' -SOCIAL_AUTH_SAML_ORG_INFO = { - 'en-US': { - 'name': 'example', - 'displayname': 'Example', - 'url': 'http://www.example.com', - }, -} -SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_SUPPORT_CONTACT = { - 'givenName': 'Some User', - 'emailAddress': 'suser@example.com', -} -SOCIAL_AUTH_SAML_ENABLED_IDPS = { - #'myidp': { - # 'entity_id': 'https://idp.example.com', - # 'url': 'https://myidp.example.com/sso', - # 'x509cert': '', - #}, - #'onelogin': { - # 'entity_id': 'https://app.onelogin.com/saml/metadata/123456', - # 'url': 'https://example.onelogin.com/trust/saml2/http-post/sso/123456', - # 'x509cert': '', - # 'attr_user_permanent_id': 'name_id', - # 'attr_first_name': 'User.FirstName', - # 'attr_last_name': 'User.LastName', - # 'attr_username': 'User.email', - # 'attr_email': 'User.email', - #}, -} - -SOCIAL_AUTH_ORGANIZATION_MAP = { - # Add all users to the default organization. - 'Default': { - 'users': True, - }, - #'Test Org': { - # 'admins': ['admin@example.com'], - # 'users': True, - #}, - #'Test Org 2': { - # 'admins': ['admin@example.com', re.compile(r'^tower-[^@]+*?@.*$], - # 'users': re.compile(r'^[^@].*?@example\.com$'), - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP = {} -#SOCIAL_AUTH_SAML_ORGANIZATION_MAP = {} - -SOCIAL_AUTH_TEAM_MAP = { - #'My Team': { - # 'organization': 'Test Org', - # 'users': ['re.compile(r'^[^@]+?@test\.example\.com$')'], - # 'remove': True, - #}, - #'Other Team': { - # 'organization': 'Test Org 2', - # 'users': re.compile(r'^[^@]+?@test2\.example\.com$'), - # 'remove': False, - #}, -} - -#SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP = {} -#SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP = {} -#SOCIAL_AUTH_SAML_TEAM_MAP = {} - -# Uncomment the line below (i.e. set SOCIAL_AUTH_USER_FIELDS to an empty list) -# to prevent new user accounts from being created. Only users who have -# previously logged in using social auth or have a user account with a matching -# email address will be able to login. - -#SOCIAL_AUTH_USER_FIELDS = [] - -# It is also possible to add custom functions to the social auth pipeline for -# more advanced organization and team mapping. Use at your own risk. - -#def custom_social_auth_pipeline_function(backend, details, user=None, *args, **kwargs): -# print 'custom:', backend, details, user, args, kwargs - -#SOCIAL_AUTH_PIPELINE += ( -# 'awx.settings.development.custom_social_auth_pipeline_function', -#) - ############################################################################### # INVENTORY IMPORT TEST SETTINGS ############################################################################### diff --git a/awx/settings/postprocess.py b/awx/settings/postprocess.py deleted file mode 100644 index d63833aac8..0000000000 --- a/awx/settings/postprocess.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved. - -# flake8: noqa - -# Runs after all configuration files have been loaded to fix/check/update -# settings as needed. - -if not AUTH_LDAP_SERVER_URI: - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.LDAPBackend'] - -if not RADIUS_SERVER: - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.RADIUSBackend'] - -if not all([SOCIAL_AUTH_GOOGLE_OAUTH2_KEY, SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.google.GoogleOAuth2'] - -if not all([SOCIAL_AUTH_GITHUB_KEY, SOCIAL_AUTH_GITHUB_SECRET]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.github.GithubOAuth2'] - -if not all([SOCIAL_AUTH_GITHUB_ORG_KEY, SOCIAL_AUTH_GITHUB_ORG_SECRET, SOCIAL_AUTH_GITHUB_ORG_NAME]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.github.GithubOrganizationOAuth2'] - -if not all([SOCIAL_AUTH_GITHUB_TEAM_KEY, SOCIAL_AUTH_GITHUB_TEAM_SECRET, SOCIAL_AUTH_GITHUB_TEAM_ID]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'social.backends.github.GithubTeamOAuth2'] - -if not all([SOCIAL_AUTH_SAML_SP_ENTITY_ID, SOCIAL_AUTH_SAML_SP_PUBLIC_CERT, - SOCIAL_AUTH_SAML_SP_PRIVATE_KEY, SOCIAL_AUTH_SAML_ORG_INFO, - SOCIAL_AUTH_SAML_TECHNICAL_CONTACT, SOCIAL_AUTH_SAML_SUPPORT_CONTACT, - SOCIAL_AUTH_SAML_ENABLED_IDPS]): - AUTHENTICATION_BACKENDS = [x for x in AUTHENTICATION_BACKENDS if x != 'awx.sso.backends.SAMLAuth'] - -if not AUTH_BASIC_ENABLED: - REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = [x for x in REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] if x != 'rest_framework.authentication.BasicAuthentication'] - -# Update cache to use celery broker URL defined in configuration files. -if CACHES['default']['BACKEND'] == 'redis_cache.RedisCache': - CACHES['default']['LOCATION'] = BROKER_URL diff --git a/awx/settings/production.py b/awx/settings/production.py index 6efe6c397d..7df7953e25 100644 --- a/awx/settings/production.py +++ b/awx/settings/production.py @@ -4,6 +4,7 @@ # Production settings for AWX project. # Python +import copy import errno import sys import traceback @@ -57,6 +58,15 @@ LOGGING['handlers']['fact_receiver']['filename'] = '/var/log/tower/fact_receiver LOGGING['handlers']['system_tracking_migrations']['filename'] = '/var/log/tower/tower_system_tracking_migrations.log' LOGGING['handlers']['rbac_migrations']['filename'] = '/var/log/tower/tower_rbac_migrations.log' +# Store a snapshot of default settings at this point (only for migrating from +# file to database settings). +if 'migrate_to_database_settings' in sys.argv: + DEFAULTS_SNAPSHOT = {} + this_module = sys.modules[__name__] + for setting in dir(this_module): + if setting == setting.upper(): + DEFAULTS_SNAPSHOT[setting] = copy.deepcopy(getattr(this_module, setting)) + # Load settings from any .py files in the global conf.d directory specified in # the environment, defaulting to /etc/tower/conf.d/. settings_dir = os.environ.get('AWX_SETTINGS_DIR', '/etc/tower/conf.d/') @@ -71,7 +81,6 @@ settings_file = os.environ.get('AWX_SETTINGS_FILE', # /etc/tower/conf.d/*.py. try: include(settings_file, optional(settings_files), scope=locals()) - include('postprocess.py', scope=locals()) except ImportError: traceback.print_exc() sys.exit(1) diff --git a/awx/sso/__init__.py b/awx/sso/__init__.py index 6596e4bf78..dd505d105d 100644 --- a/awx/sso/__init__.py +++ b/awx/sso/__init__.py @@ -19,3 +19,6 @@ def xmlsec_initialize(*args, **kwargs): xmlsec_initialized = True dm.xmlsec.binding.initialize = xmlsec_initialize + + +default_app_config = 'awx.sso.apps.SSOConfig' diff --git a/awx/sso/apps.py b/awx/sso/apps.py new file mode 100644 index 0000000000..45c00e871b --- /dev/null +++ b/awx/sso/apps.py @@ -0,0 +1,9 @@ +# Django +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class SSOConfig(AppConfig): + + name = 'awx.sso' + verbose_name = _('Single Sign-On') diff --git a/awx/sso/backends.py b/awx/sso/backends.py index 91999034d5..591b3ab988 100644 --- a/awx/sso/backends.py +++ b/awx/sso/backends.py @@ -3,11 +3,13 @@ # Python import logging +import uuid # Django from django.dispatch import receiver from django.contrib.auth.models import User from django.conf import settings as django_settings +from django.core.signals import setting_changed # django-auth-ldap from django_auth_ldap.backend import LDAPSettings as BaseLDAPSettings @@ -23,7 +25,7 @@ from social.backends.saml import SAMLAuth as BaseSAMLAuth from social.backends.saml import SAMLIdentityProvider as BaseSAMLIdentityProvider # Ansible Tower -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled logger = logging.getLogger('awx.sso.backends') @@ -43,6 +45,20 @@ class LDAPBackend(BaseLDAPBackend): settings_prefix = 'AUTH_LDAP_' + def __init__(self, *args, **kwargs): + self._dispatch_uid = uuid.uuid4() + super(LDAPBackend, self).__init__(*args, **kwargs) + setting_changed.connect(self._on_setting_changed, dispatch_uid=self._dispatch_uid) + + def __del__(self): + setting_changed.disconnect(dispatch_uid=self._dispatch_uid) + + def _on_setting_changed(self, sender, **kwargs): + # If any AUTH_LDAP_* setting changes, force settings to be reloaded for + # this backend instance. + if kwargs.get('setting', '').startswith(self.settings_prefix): + self._settings = None + def _get_settings(self): if self._settings is None: self._settings = LDAPSettings(self.settings_prefix) diff --git a/awx/sso/conf.py b/awx/sso/conf.py new file mode 100644 index 0000000000..264b609367 --- /dev/null +++ b/awx/sso/conf.py @@ -0,0 +1,967 @@ +# Python +import collections +import urlparse + +# Django +from django.conf import settings +from django.core.urlresolvers import reverse +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import register +from awx.sso import fields +from awx.main.validators import validate_private_key, validate_certificate +from awx.sso.validators import * # noqa + + +class SocialAuthCallbackURL(object): + + def __init__(self, provider): + self.provider = provider + + def __call__(self): + path = reverse('social:complete', args=(self.provider,)) + return urlparse.urljoin(settings.TOWER_URL_BASE, path) + +SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT = _('''\ +Mapping to organization admins/users from social auth accounts. This setting +controls which users are placed into which Tower organizations based on +their username and email address. Dictionary keys are organization names. +organizations will be created if not present if the license allows for +multiple organizations, otherwise the single default organization is used +regardless of the key. Values are dictionaries defining the options for +each organization's membership. For each organization it is possible to +specify which users are automatically users of the organization and also +which users can administer the organization. + +- admins: None, True/False, string or list/tuple of strings. + If None, organization admins will not be updated. + If True, all users using social auth will automatically be added as admins + of the organization. + If False, no social auth users will be automatically added as admins of + the organiation. + If a string or list of strings, specifies the usernames and emails for + users who will be added to the organization. Strings in the format + "//" will be interpreted as regular expressions and may also + be used instead of string literals; only "i" and "m" are supported for flags. +- remove_admins: True/False. Defaults to False. + If True, a user who does not match will be removed from the organization's + administrative list. +- users: None, True/False, string or list/tuple of strings. Same rules apply + as for admins. +- remove_users: True/False. Defaults to False. Same rules as apply for + remove_admins.\ +''') + +# FIXME: /regex/gim (flags) + +SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER = collections.OrderedDict([ + ('Default', collections.OrderedDict([ + ('users', True), + ])), + ('Test Org', collections.OrderedDict([ + ('admins', ['admin@example.com']), + ('users', True), + ])), + ('Test Org 2', collections.OrderedDict([ + ('admins', ['admin@example.com', r'/^tower-[^@]+*?@.*$/']), + ('remove_admins', True), + ('users', r'/^[^@].*?@example\.com$/i'), + ('remove_users', True), + ])), +]) + +SOCIAL_AUTH_TEAM_MAP_HELP_TEXT = _('''\ +Mapping of team members (users) from social auth accounts. Keys are team +names (will be created if not present). Values are dictionaries of options +for each team's membership, where each can contain the following parameters: + +- organization: string. The name of the organization to which the team + belongs. The team will be created if the combination of organization and + team name does not exist. The organization will first be created if it + does not exist. If the license does not allow for multiple organizations, + the team will always be assigned to the single default organization. +- users: None, True/False, string or list/tuple of strings. + If None, team members will not be updated. + If True/False, all social auth users will be added/removed as team + members. + If a string or list of strings, specifies expressions used to match users. + User will be added as a team member if the username or email matches. + Strings in the format "//" will be interpreted as regular + expressions and may also be used instead of string literals; only "i" and "m" + are supported for flags. +- remove: True/False. Defaults to False. If True, a user who does not match + the rules above will be removed from the team.\ +''') + +SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER = collections.OrderedDict([ + ('My Team', collections.OrderedDict([ + ('organization', 'Test Org'), + ('users', [r'/^[^@]+?@test\.example\.com$/']), + ('remove', True), + ])), + ('Other Team', collections.OrderedDict([ + ('organization', 'Test Org 2'), + ('users', r'/^[^@]+?@test2\.example\.com$/i'), + ('remove', False), + ])), +]) + +############################################################################### +# AUTHENTICATION BACKENDS DYNAMIC SETTING +############################################################################### + +register( + 'AUTHENTICATION_BACKENDS', + field_class=fields.AuthenticationBackendsField, + label=_('Authentication Backends'), + help_text=_('List of authentication backends that are enabled based on ' + 'license features and other authentication settings.'), + read_only=True, + depends_on=fields.AuthenticationBackendsField.get_all_required_settings(), + category=_('Authentication'), + category_slug='authentication', +) + +register( + 'SOCIAL_AUTH_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + default={}, + label=_('Social Auth Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('Authentication'), + category_slug='authentication', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_TEAM_MAP', + field_class=fields.SocialTeamMapField, + default={}, + label=_('Social Auth Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('Authentication'), + category_slug='authentication', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_USER_FIELDS', + field_class=fields.StringListField, + allow_null=True, + default=None, + label=_('Social Auth User Fields'), + help_text=_('When set to an empty list `[]`, this setting prevents new user ' + 'accounts from being created. Only users who have previously ' + 'logged in using social auth or have a user account with a ' + 'matching email address will be able to login.'), + category=_('Authentication'), + category_slug='authentication', + placeholder=['username', 'email'], +) + +############################################################################### +# LDAP AUTHENTICATION SETTINGS +############################################################################### + +register( + 'AUTH_LDAP_SERVER_URI', + field_class=fields.URLField, + schemes=('ldap', 'ldaps'), + allow_blank=True, + label=_('LDAP Server URI'), + help_text=_('URI to connect to LDAP server, such as "ldap://ldap.example.com:389" ' + '(non-SSL) or "ldaps://ldap.example.com:636" (SSL). LDAP authentication ' + 'is disabled if this parameter is empty or your license does not ' + 'enable LDAP support.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='ldaps://ldap.example.com:636', +) + +register( + 'AUTH_LDAP_BIND_DN', + field_class=fields.CharField, + allow_blank=True, + default='', + validators=[validate_ldap_bind_dn], + label=_('LDAP Bind DN'), + help_text=_('DN (Distinguished Name) of user to bind for all search queries. ' + 'Normally in the format "CN=Some User,OU=Users,DC=example,DC=com" ' + 'but may also be specified as "DOMAIN\username" for Active Directory. ' + 'This is the system user account we will use to login to query LDAP ' + 'for other user information.'), + category=_('LDAP'), + category_slug='ldap', +) + +register( + 'AUTH_LDAP_BIND_PASSWORD', + field_class=fields.CharField, + allow_blank=True, + default='', + label=_('LDAP Bind Password'), + help_text=_('Password used to bind LDAP user account.'), + category=_('LDAP'), + category_slug='ldap', +) + +register( + 'AUTH_LDAP_START_TLS', + field_class=fields.BooleanField, + default=False, + label=_('LDAP Start TLS'), + help_text=_('Whether to enable TLS when the LDAP connection is not using SSL.'), + category=_('LDAP'), + category_slug='ldap', +) + +register( + 'AUTH_LDAP_CONNECTION_OPTIONS', + field_class=fields.LDAPConnectionOptionsField, + default={'OPT_REFERRALS': 0}, + label=_('LDAP Connection Options'), + help_text=_('Additional options to set for the LDAP connection. LDAP ' + 'referrals are disabled by default (to prevent certain LDAP ' + 'queries from hanging with AD). Option names should be strings ' + '(e.g. "OPT_REFERRALS"). Refer to ' + 'https://www.python-ldap.org/doc/html/ldap.html#options for ' + 'possible options and values that can be set.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('OPT_REFERRALS', 0), + ]), +) + +register( + 'AUTH_LDAP_USER_SEARCH', + field_class=fields.LDAPSearchUnionField, + default=[], + label=_('LDAP User Search'), + help_text=_('LDAP search query to find users. Any user that matches the given ' + 'pattern will be able to login to Tower. The user should also be ' + 'mapped into an Tower organization (as defined in the ' + 'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries ' + 'need to be supported use of "LDAPUnion" is possible. See ' + 'python-ldap documentation as linked at the top of this section.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=( + 'OU=Users,DC=example,DC=com', + 'SCOPE_SUBTREE', + '(sAMAccountName=%(user)s)', + ), +) + +register( + 'AUTH_LDAP_USER_DN_TEMPLATE', + field_class=fields.LDAPDNWithUserField, + allow_blank=True, + default='', + label=_('LDAP User DN Template'), + help_text=_('Alternative to user search, if user DNs are all of the same ' + 'format. This approach will be more efficient for user lookups than ' + 'searching if it is usable in your organizational environment. If ' + 'this setting has a value it will be used instead of ' + 'AUTH_LDAP_USER_SEARCH.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='uid=%(user)s,OU=Users,DC=example,DC=com', +) + +register( + 'AUTH_LDAP_USER_ATTR_MAP', + field_class=fields.LDAPUserAttrMapField, + default={}, + label=_('LDAP User Attribute Map'), + help_text=_('Mapping of LDAP user schema to Tower API user atrributes (key is ' + 'user attribute name, value is LDAP attribute name). The default ' + 'setting is valid for ActiveDirectory but users with other LDAP ' + 'configurations may need to change the values (not the keys) of ' + 'the dictionary/hash-table.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('first_name', 'givenName'), + ('last_name', 'sn'), + ('email', 'mail'), + ]), +) + +register( + 'AUTH_LDAP_GROUP_SEARCH', + field_class=fields.LDAPSearchField, + default=[], + label=_('LDAP Group Search'), + help_text=_('Users in Tower are mapped to organizations based on their ' + 'membership in LDAP groups. This setting defines the LDAP search ' + 'query to find groups. Note that this, unlike the user search ' + 'above, does not support LDAPSearchUnion.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=( + 'DC=example,DC=com', + 'SCOPE_SUBTREE', + '(objectClass=group)', + ), +) + +register( + 'AUTH_LDAP_GROUP_TYPE', + field_class=fields.LDAPGroupTypeField, + label=_('LDAP Group Type'), + help_text=_('The group type may need to be changed based on the type of the ' + 'LDAP server. Values are listed at: ' + 'http://pythonhosted.org/django-auth-ldap/groups.html#types-of-groups'), + category=_('LDAP'), + category_slug='ldap', +) + +register( + 'AUTH_LDAP_REQUIRE_GROUP', + field_class=fields.LDAPDNField, + allow_blank=True, + default='', + label=_('LDAP Require Group'), + help_text=_('Group DN required to login. If specified, user must be a member ' + 'of this group to login via LDAP. If not set, everyone in LDAP ' + 'that matches the user search will be able to login via Tower. ' + 'Only one require group is supported.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='CN=Tower Users,OU=Users,DC=example,DC=com', +) + +register( + 'AUTH_LDAP_DENY_GROUP', + field_class=fields.LDAPDNField, + allow_blank=True, + default='', + label=_('LDAP Deny Group'), + help_text=_('Group DN denied from login. If specified, user will not be ' + 'allowed to login if a member of this group. Only one deny group ' + 'is supported.'), + category=_('LDAP'), + category_slug='ldap', + placeholder='CN=Disabled Users,OU=Users,DC=example,DC=com', +) + +register( + 'AUTH_LDAP_USER_FLAGS_BY_GROUP', + field_class=fields.LDAPUserFlagsField, + default={}, + label=_('LDAP User Flags By Group'), + help_text=_('User profile flags updated from group membership (key is user ' + 'attribute name, value is group DN). These are boolean fields ' + 'that are matched based on whether the user is a member of the ' + 'given group. So far only is_superuser is settable via this ' + 'method. This flag is set both true and false at login time ' + 'based on current LDAP settings.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('is_superuser', 'CN=Domain Admins,CN=Users,DC=example,DC=com'), + ]), +) + +register( + 'AUTH_LDAP_ORGANIZATION_MAP', + field_class=fields.LDAPOrganizationMapField, + default={}, + label=_('LDAP Organization Map'), + help_text=_('Mapping between organization admins/users and LDAP groups. This ' + 'controls what users are placed into what Tower organizations ' + 'relative to their LDAP group memberships. Keys are organization ' + 'names. Organizations will be created if not present. Values are ' + 'dictionaries defining the options for each organization\'s ' + 'membership. For each organization it is possible to specify ' + 'what groups are automatically users of the organization and also ' + 'what groups can administer the organization.\n\n' + ' - admins: None, True/False, string or list of strings.\n' + ' If None, organization admins will not be updated based on ' + 'LDAP values.\n' + ' If True, all users in LDAP will automatically be added as ' + 'admins of the organization.\n' + ' If False, no LDAP users will be automatically added as admins ' + 'of the organiation.\n' + ' If a string or list of strings, specifies the group DN(s) ' + 'that will be added of the organization if they match any of the ' + 'specified groups.\n' + ' - remove_admins: True/False. Defaults to True.\n' + ' If True, a user who is not an member of the given groups will ' + 'be removed from the organization\'s administrative list.\n' + ' - users: None, True/False, string or list/tuple of strings. ' + 'Same rules apply as for admins.\n' + ' - remove_users: True/False. Defaults to True. Same rules apply ' + 'as for remove_admins.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('Test Org', collections.OrderedDict([ + ('admins', 'CN=Domain Admins,CN=Users,DC=example,DC=com'), + ('users', ['CN=Domain Users,CN=Users,DC=example,DC=com']), + ('remove_users', True), + ('remove_admins', True), + ])), + ('Test Org 2', collections.OrderedDict([ + ('admins', 'CN=Administrators,CN=Builtin,DC=example,DC=com'), + ('users', True), + ('remove_users', True), + ('remove_admins', True), + ])), + ]), +) + +register( + 'AUTH_LDAP_TEAM_MAP', + field_class=fields.LDAPTeamMapField, + default={}, + label=_('LDAP Team Map'), + help_text=_('Mapping between team members (users) and LDAP groups. Keys are ' + 'team names (will be created if not present). Values are ' + 'dictionaries of options for each team\'s membership, where each ' + 'can contain the following parameters:\n\n' + ' - organization: string. The name of the organization to which ' + 'the team belongs. The team will be created if the combination of ' + 'organization and team name does not exist. The organization will ' + 'first be created if it does not exist.\n' + ' - users: None, True/False, string or list/tuple of strings.\n' + ' If None, team members will not be updated.\n' + ' If True/False, all LDAP users will be added/removed as team ' + 'members.\n' + ' If a string or list of strings, specifies the group DN(s). ' + 'User will be added as a team member if the user is a member of ' + 'ANY of these groups.\n' + '- remove: True/False. Defaults to False. If True, a user who is ' + 'not a member of the given groups will be removed from the team.'), + category=_('LDAP'), + category_slug='ldap', + placeholder=collections.OrderedDict([ + ('My Team', collections.OrderedDict([ + ('organization', 'Test Org'), + ('users', ['CN=Domain Users,CN=Users,DC=example,DC=com']), + ('remove', True), + ])), + ('Other Team', collections.OrderedDict([ + ('organization', 'Test Org 2'), + ('users', 'CN=Other Users,CN=Users,DC=example,DC=com'), + ('remove', False), + ])), + ]), +) + +############################################################################### +# RADIUS AUTHENTICATION SETTINGS +############################################################################### + +register( + 'RADIUS_SERVER', + field_class=fields.CharField, + allow_blank=True, + default='', + label=_('RADIUS Server'), + help_text=_('Hostname/IP of RADIUS server. RADIUS authentication will be ' + 'disabled if this setting is empty.'), + category=_('RADIUS'), + category_slug='radius', + placeholder='radius.example.com', +) + +register( + 'RADIUS_PORT', + field_class=fields.IntegerField, + min_value=1, + max_value=65535, + default=1812, + label=_('RADIUS Port'), + help_text=_('Port of RADIUS server.'), + category=_('RADIUS'), + category_slug='radius', +) + +register( + 'RADIUS_SECRET', + field_class=fields.RADIUSSecretField, + allow_blank=True, + default='', + label=_('RADIUS Secret'), + help_text=_('Shared secret for authenticating to RADIUS server.'), + category=_('RADIUS'), + category_slug='radius', +) + +############################################################################### +# GOOGLE OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('google-oauth2'), + label=_('Google OAuth2 Callback URL'), + help_text=_('Create a project at https://console.developers.google.com/ to ' + 'obtain an OAuth2 key and secret for a web application. Ensure ' + 'that the Google+ API is enabled. Provide this URL as the ' + 'callback URL for your application.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('Google OAuth2 Key'), + help_text=_('The OAuth2 key from your web application at https://console.developers.google.com/.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder='528620852399-gm2dt4hrl2tsj67fqamk09k1e0ad6gd8.apps.googleusercontent.com', +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('Google OAuth2 Secret'), + help_text=_('The OAuth2 secret from your web application at https://console.developers.google.com/.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder='q2fMVCmEregbg-drvebPp8OW', +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS', + field_class=fields.StringListField, + default=[], + label=_('Google OAuth2 Whitelisted Domains'), + help_text=_('Update this setting to restrict the domains who are allowed to ' + 'login using Google OAuth2.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder=['example.com'], +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_AUTH_EXTRA_ARGUMENTS', + field_class=fields.DictField, + default={}, + label=_('Google OAuth2 Extra Arguments'), + help_text=_('Extra arguments for Google OAuth2 login. When only allowing a ' + 'single domain to authenticate, set to `{"hd": "yourdomain.com"}` ' + 'and Google will not display any other accounts even if the user ' + 'is logged in with multiple Google accounts.'), + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder={'hd': 'example.com'}, +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('Google OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GOOGLE_OAUTH2_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('Google OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('Google OAuth2'), + category_slug='google-oauth2', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# GITHUB OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GITHUB_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('github'), + label=_('GitHub OAuth2 Callback URL'), + help_text=_('Create a developer application at ' + 'https://github.com/settings/developers to obtain an OAuth2 ' + 'key (Client ID) and secret (Client Secret). Provide this URL ' + 'as the callback URL for your application.'), + category=_('GitHub OAuth2'), + category_slug='github', +) + +register( + 'SOCIAL_AUTH_GITHUB_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your GitHub developer application.'), + category=_('GitHub OAuth2'), + category_slug='github', +) + +register( + 'SOCIAL_AUTH_GITHUB_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your GitHub developer application.'), + category=_('GitHub OAuth2'), + category_slug='github', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('GitHub OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('GitHub OAuth2'), + category_slug='github', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('GitHub OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('GitHub OAuth2'), + category_slug='github', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# GITHUB ORG OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GITHUB_ORG_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('github-org'), + label=_('GitHub Organization OAuth2 Callback URL'), + help_text=_('Create an organization-owned application at ' + 'https://github.com/organizations//settings/applications ' + 'and obtain an OAuth2 key (Client ID) and secret (Client Secret). ' + 'Provide this URL as the callback URL for your application.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Organization OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your GitHub organization application.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Organization OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your GitHub organization application.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_NAME', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Organization Name'), + help_text=_('The name of your GitHub organization, as used in your ' + 'organization\'s URL: https://github.com//.'), + category=_('GitHub Organization OAuth2'), + category_slug='github-org', +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('GitHub Organization OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('GitHub Organization OAuth2'), + category_slug='github-org', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GITHUB_ORG_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('GitHub Organization OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('GitHub Organization OAuth2'), + category_slug='github-org', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# GITHUB TEAM OAUTH2 AUTHENTICATION SETTINGS +############################################################################### + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('github-team'), + label=_('GitHub Team OAuth2 Callback URL'), + help_text=_('Create an organization-owned application at ' + 'https://github.com/organizations//settings/applications ' + 'and obtain an OAuth2 key (Client ID) and secret (Client Secret). ' + 'Provide this URL as the callback URL for your application.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_KEY', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Team OAuth2 Key'), + help_text=_('The OAuth2 key (Client ID) from your GitHub organization application.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_SECRET', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Team OAuth2 Secret'), + help_text=_('The OAuth2 secret (Client Secret) from your GitHub organization application.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_ID', + field_class=fields.CharField, + allow_blank=True, + label=_('GitHub Team ID'), + help_text=_('Find the numeric team ID using the Github API: ' + 'http://fabian-kostadinov.github.io/2015/01/16/how-to-find-a-github-team-id/.'), + category=_('GitHub Team OAuth2'), + category_slug='github-team', +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('GitHub Team OAuth2 Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('GitHub Team OAuth2'), + category_slug='github-team', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_GITHUB_TEAM_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('GitHub Team OAuth2 Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('GitHub Team OAuth2'), + category_slug='github-team', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) + +############################################################################### +# SAML AUTHENTICATION SETTINGS +############################################################################### + +def get_saml_metadata_url(): + return urlparse.urljoin(settings.TOWER_URL_BASE, reverse('sso:saml_metadata')) + +register( + 'SOCIAL_AUTH_SAML_CALLBACK_URL', + field_class=fields.CharField, + read_only=True, + default=SocialAuthCallbackURL('saml'), + label=_('SAML Service Provider Callback URL'), + help_text=_('Register Tower as a service provider (SP) with each identity ' + 'provider (IdP) you have configured. Provide your SP Entity ID ' + 'and this callback URL for your application.'), + category=_('SAML'), + category_slug='saml', +) + +register( + 'SOCIAL_AUTH_SAML_METADATA_URL', + field_class=fields.CharField, + read_only=True, + default=get_saml_metadata_url, + label=_('SAML Service Provider Metadata URL'), + help_text=_('If your identity provider (IdP) allows uploading an XML ' + 'metadata file, you can download one from this URL.'), + category=_('SAML'), + category_slug='saml', +) + +register( + 'SOCIAL_AUTH_SAML_SP_ENTITY_ID', + field_class=fields.URLField, + schemes=('http', 'https'), + allow_blank=True, + default='', + label=_('SAML Service Provider Entity ID'), + help_text=_('Set to a URL for a domain name you own (does not need to be a ' + 'valid URL; only used as a unique ID).'), + category=_('SAML'), + category_slug='saml', +) + +register( + 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', + field_class=fields.CharField, + allow_blank=True, + default='', + validators=[validate_certificate], + label=_('SAML Service Provider Public Certificate'), + help_text=_('Create a keypair for Tower to use as a service provider (SP) ' + 'and include the certificate content here.'), + category=_('SAML'), + category_slug='saml', +) + +register( + 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', + field_class=fields.CharField, + allow_blank=True, + default='', + validators=[validate_private_key], + label=_('SAML Service Provider Private Key'), + help_text=_('Create a keypair for Tower to use as a service provider (SP) ' + 'and include the private key content here.'), + category=_('SAML'), + category_slug='saml', +) + +register( + 'SOCIAL_AUTH_SAML_ORG_INFO', + field_class=fields.SAMLOrgInfoField, + default={}, + label=_('SAML Service Provider Organization Info'), + help_text=_('Configure this setting with information about your app.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('en-US', collections.OrderedDict([ + ('name', 'example'), + ('displayname', 'Example'), + ('url', 'http://www.example.com'), + ])), + ]), +) + +register( + 'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT', + field_class=fields.SAMLContactField, + default={}, + label=_('SAML Service Provider Technical Contact'), + help_text=_('Configure this setting with your contact information.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('givenName', 'Technical Contact'), + ('emailAddress', 'techsup@example.com'), + ]), +) + +register( + 'SOCIAL_AUTH_SAML_SUPPORT_CONTACT', + field_class=fields.SAMLContactField, + default={}, + label=_('SAML Service Provider Support Contact'), + help_text=_('Configure this setting with your contact information.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('givenName', 'Support Contact'), + ('emailAddress', 'support@example.com'), + ]), +) + +register( + 'SOCIAL_AUTH_SAML_ENABLED_IDPS', + field_class=fields.SAMLEnabledIdPsField, + default={}, + label=_('SAML Enabled Identity Providers'), + help_text=_('Configure the Entity ID, SSO URL and certificate for each ' + 'identity provider (IdP) in use. Multiple SAML IdPs are supported. ' + 'Some IdPs may provide user data using attribute names that differ ' + 'from the default OIDs ' + '(https://github.com/omab/python-social-auth/blob/master/social/backends/saml.py#L16). ' + 'Attribute names may be overridden for each IdP.'), + category=_('SAML'), + category_slug='saml', + placeholder=collections.OrderedDict([ + ('Okta', collections.OrderedDict([ + ('entity_id', 'http://www.okta.com/HHniyLkaxk9e76wD0Thh'), + ('url', 'https://dev-123456.oktapreview.com/app/ansibletower/HHniyLkaxk9e76wD0Thh/sso/saml'), + ('x509cert', 'MIIDpDCCAoygAwIBAgIGAVVZ4rPzMA0GCSqGSIb3...'), + ('attr_user_permanent_id', 'username'), + ('attr_first_name', 'first_name'), + ('attr_last_name', 'last_name'), + ('attr_username', 'username'), + ('attr_email', 'email'), + ])), + ('OneLogin', collections.OrderedDict([ + ('entity_id', 'https://app.onelogin.com/saml/metadata/123456'), + ('url', 'https://example.onelogin.com/trust/saml2/http-post/sso/123456'), + ('x509cert', 'MIIEJjCCAw6gAwIBAgIUfuSD54OPSBhndDHh3gZo...'), + ('attr_user_permanent_id', 'name_id'), + ('attr_first_name', 'User.FirstName'), + ('attr_last_name', 'User.LastName'), + ('attr_username', 'User.email'), + ('attr_email', 'User.email'), + ])), + ]), +) + +register( + 'SOCIAL_AUTH_SAML_ORGANIZATION_MAP', + field_class=fields.SocialOrganizationMapField, + allow_null=True, + default=None, + label=_('SAML Organization Map'), + help_text=SOCIAL_AUTH_ORGANIZATION_MAP_HELP_TEXT, + category=_('SAML'), + category_slug='saml', + placeholder=SOCIAL_AUTH_ORGANIZATION_MAP_PLACEHOLDER, +) + +register( + 'SOCIAL_AUTH_SAML_TEAM_MAP', + field_class=fields.SocialTeamMapField, + allow_null=True, + default=None, + label=_('SAML Team Map'), + help_text=SOCIAL_AUTH_TEAM_MAP_HELP_TEXT, + category=_('SAML'), + category_slug='saml', + placeholder=SOCIAL_AUTH_TEAM_MAP_PLACEHOLDER, +) diff --git a/awx/sso/fields.py b/awx/sso/fields.py new file mode 100644 index 0000000000..ce8316e265 --- /dev/null +++ b/awx/sso/fields.py @@ -0,0 +1,598 @@ +# Python LDAP +import ldap + +# Django +from django.utils.translation import ugettext_lazy as _ + +# Django Auth LDAP +import django_auth_ldap.config +from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion + +# Tower +from awx.conf import fields +from awx.conf.fields import * # noqa +from awx.conf.license import feature_enabled +from awx.main.validators import validate_certificate +from awx.sso.validators import * # noqa + + +def get_subclasses(cls): + for subclass in cls.__subclasses__(): + for subsubclass in get_subclasses(subclass): + yield subsubclass + yield subclass + + +class AuthenticationBackendsField(fields.StringListField): + + # Mapping of settings that must be set in order to enable each + # authentication backend. + REQUIRED_BACKEND_SETTINGS = collections.OrderedDict([ + ('awx.sso.backends.LDAPBackend', [ + 'AUTH_LDAP_SERVER_URI', + ]), + ('awx.sso.backends.RADIUSBackend', [ + 'RADIUS_SERVER', + ]), + ('social.backends.google.GoogleOAuth2', [ + 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', + 'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET', + ]), + ('social.backends.github.GithubOAuth2', [ + 'SOCIAL_AUTH_GITHUB_KEY', + 'SOCIAL_AUTH_GITHUB_SECRET', + ]), + ('social.backends.github.GithubOrganizationOAuth2', [ + 'SOCIAL_AUTH_GITHUB_ORG_KEY', + 'SOCIAL_AUTH_GITHUB_ORG_SECRET', + 'SOCIAL_AUTH_GITHUB_ORG_NAME', + ]), + ('social.backends.github.GithubTeamOAuth2', [ + 'SOCIAL_AUTH_GITHUB_TEAM_KEY', + 'SOCIAL_AUTH_GITHUB_TEAM_SECRET', + 'SOCIAL_AUTH_GITHUB_TEAM_ID', + ]), + ('awx.sso.backends.SAMLAuth', [ + 'SOCIAL_AUTH_SAML_SP_ENTITY_ID', + 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', + 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', + 'SOCIAL_AUTH_SAML_ORG_INFO', + 'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT', + 'SOCIAL_AUTH_SAML_SUPPORT_CONTACT', + 'SOCIAL_AUTH_SAML_ENABLED_IDPS', + ]), + ('django.contrib.auth.backends.ModelBackend', []), + ]) + + REQUIRED_BACKEND_FEATURE = { + 'awx.sso.backends.LDAPBackend': 'ldap', + 'awx.sso.backends.RADIUSBackend': 'enterprise_auth', + 'awx.sso.backends.SAMLAuth': 'enterprise_auth', + } + + @classmethod + def get_all_required_settings(cls): + all_required_settings = set(['LICENSE']) + for required_settings in cls.REQUIRED_BACKEND_SETTINGS.values(): + all_required_settings.update(required_settings) + return all_required_settings + + def __init__(self, *args, **kwargs): + kwargs.setdefault('default', self._default_from_required_settings) + super(AuthenticationBackendsField, self).__init__(*args, **kwargs) + + def _default_from_required_settings(self): + from django.conf import settings + try: + backends = settings._awx_conf_settings._get_default('AUTHENTICATION_BACKENDS') + except AttributeError: + backends = self.REQUIRED_BACKEND_SETTINGS.keys() + # Filter which authentication backends are enabled based on their + # required settings being defined and non-empty. Also filter available + # backends based on license features. + for backend, required_settings in self.REQUIRED_BACKEND_SETTINGS.items(): + if backend not in backends: + continue + required_feature = self.REQUIRED_BACKEND_FEATURE.get(backend, '') + if not required_feature or feature_enabled(required_feature): + if all([getattr(settings, rs, None) for rs in required_settings]): + continue + backends = filter(lambda x: x != backend, backends) + return backends + + +class LDAPConnectionOptionsField(fields.DictField): + + default_error_messages = { + 'invalid_options': _('Invalid connection option(s): {invalid_options}.'), + } + + def to_representation(self, value): + value = value or {} + opt_names = ldap.OPT_NAMES_DICT + # Convert integer options to their named constants. + repr_value = {} + for opt, opt_value in value.items(): + if opt in opt_names: + repr_value[opt_names[opt]] = opt_value + return repr_value + + def to_internal_value(self, data): + data = super(LDAPConnectionOptionsField, self).to_internal_value(data) + valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()]) + invalid_options = set(data.keys()) - set(valid_options.keys()) + if invalid_options: + options_display = json.dumps(list(invalid_options)).lstrip('[').rstrip(']') + self.fail('invalid_options', invalid_options=options_display) + # Convert named options to their integer constants. + internal_data = {} + for opt_name, opt_value in data.items(): + internal_data[valid_options[opt_name]] = opt_value + return internal_data + + +class LDAPDNField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPDNField, self).__init__(**kwargs) + self.validators.append(validate_ldap_dn) + + +class LDAPDNWithUserField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPDNWithUserField, self).__init__(**kwargs) + self.validators.append(validate_ldap_dn_with_user) + + +class LDAPFilterField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPFilterField, self).__init__(**kwargs) + self.validators.append(validate_ldap_filter) + + +class LDAPFilterWithUserField(fields.CharField): + + def __init__(self, **kwargs): + super(LDAPFilterWithUserField, self).__init__(**kwargs) + self.validators.append(validate_ldap_filter_with_user) + + +class LDAPScopeField(fields.ChoiceField): + + def __init__(self, choices=None, **kwargs): + choices = choices or [ + ('SCOPE_BASE', _('Base')), + ('SCOPE_ONELEVEL', _('One Level')), + ('SCOPE_SUBTREE', _('Subtree')), + ] + super(LDAPScopeField, self).__init__(choices, **kwargs) + + def to_representation(self, value): + for choice in self.choices.keys(): + if value == getattr(ldap, choice): + return choice + return super(LDAPScopeField, self).to_representation(value) + + def to_internal_value(self, data): + value = super(LDAPScopeField, self).to_internal_value(data) + return getattr(ldap, value) + + +class LDAPSearchField(fields.ListField): + + default_error_messages = { + 'invalid_length': _('Expected a list of three items but got {length} instead.'), + 'type_error': _('Expected an instance of LDAPSearch but got {input_type} instead.'), + } + ldap_filter_field_class = LDAPFilterField + + def to_representation(self, value): + if not value: + return [] + if not isinstance(value, LDAPSearch): + self.fail('type_error', input_type=type(value)) + return [ + LDAPDNField().to_representation(value.base_dn), + LDAPScopeField().to_representation(value.scope), + self.ldap_filter_field_class().to_representation(value.filterstr), + ] + + def to_internal_value(self, data): + data = super(LDAPSearchField, self).to_internal_value(data) + if len(data) == 0: + return None + if len(data) != 3: + self.fail('invalid_length', length=len(data)) + return LDAPSearch( + LDAPDNField().run_validation(data[0]), + LDAPScopeField().run_validation(data[1]), + self.ldap_filter_field_class().run_validation(data[2]), + ) + + +class LDAPSearchWithUserField(LDAPSearchField): + + ldap_filter_field_class = LDAPFilterWithUserField + + +class LDAPSearchUnionField(fields.ListField): + + default_error_messages = { + 'type_error': _('Expected an instance of LDAPSearch or LDAPSearchUnion but got {input_type} instead.'), + } + ldap_search_field_class = LDAPSearchWithUserField + + def to_representation(self, value): + if not value: + return [] + elif isinstance(value, LDAPSearchUnion): + return [self.ldap_search_field_class().to_representation(s) for s in value.searches] + elif isinstance(value, LDAPSearch): + return self.ldap_search_field_class().to_representation(value) + else: + self.fail('type_error', input_type=type(value)) + + def to_internal_value(self, data): + data = super(LDAPSearchUnionField, self).to_internal_value(data) + if len(data) == 0: + return None + if len(data) == 3 and isinstance(data[0], basestring): + return self.ldap_search_field_class().run_validation(data) + else: + return LDAPSearchUnion(*[self.ldap_search_field_class().run_validation(x) for x in data]) + + +class LDAPUserAttrMapField(fields.DictField): + + default_error_messages = { + 'invalid_attrs': _('Invalid user attribute(s): {invalid_attrs}.'), + } + valid_user_attrs = {'first_name', 'last_name', 'email'} + child = fields.CharField() + + def to_internal_value(self, data): + data = super(LDAPUserAttrMapField, self).to_internal_value(data) + invalid_attrs = (set(data.keys()) - self.valid_user_attrs) + if invalid_attrs: + attrs_display = json.dumps(list(invalid_attrs)).lstrip('[').rstrip(']') + self.fail('invalid_attrs', invalid_attrs=attrs_display) + return data + + +class LDAPGroupTypeField(fields.ChoiceField): + + default_error_messages = { + 'type_error': _('Expected an instance of LDAPGroupType but got {input_type} instead.'), + } + + def __init__(self, choices=None, **kwargs): + group_types = get_subclasses(django_auth_ldap.config.LDAPGroupType) + choices = choices or [(x.__name__, x.__name__) for x in group_types] + super(LDAPGroupTypeField, self).__init__(choices, **kwargs) + + def to_representation(self, value): + if not value: + return '' + if not isinstance(value, django_auth_ldap.config.LDAPGroupType): + self.fail('type_error', input_type=type(value)) + return value.__class__.__name__ + + def to_internal_value(self, data): + data = super(LDAPGroupTypeField, self).to_internal_value(data) + if not data: + return None + return getattr(django_auth_ldap.config, data)() + + +class LDAPUserFlagsField(fields.DictField): + + default_error_messages = { + 'invalid_flag': _('Invalid user flag: "{invalid_flag}".'), + } + valid_user_flags = {'is_superuser'} + child = LDAPDNField() + + def to_internal_value(self, data): + data = super(LDAPUserFlagsField, self).to_internal_value(data) + invalid_flags = (set(data.keys()) - self.valid_user_flags) + if invalid_flags: + self.fail('invalid_flag', invalid_flag=list(invalid_flags)[0]) + return data + + +class LDAPDNMapField(fields.ListField): + + default_error_messages = { + 'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.'), + } + child = LDAPDNField() + + def to_representation(self, value): + if isinstance(value, (list, tuple)): + return super(LDAPDNMapField, self).to_representation(value) + elif value in fields.NullBooleanField.TRUE_VALUES: + return True + elif value in fields.NullBooleanField.FALSE_VALUES: + return False + elif value in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(value, basestring): + return self.child.to_representation(value) + else: + self.fail('type_error', input_type=type(value)) + + def to_internal_value(self, data): + if isinstance(data, (list, tuple)): + return super(LDAPDNMapField, self).to_internal_value(data) + elif data in fields.NullBooleanField.TRUE_VALUES: + return True + elif data in fields.NullBooleanField.FALSE_VALUES: + return False + elif data in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(data, basestring): + return self.child.run_validation(data) + else: + self.fail('type_error', input_type=type(data)) + + +class BaseDictWithChildField(fields.DictField): + + default_error_messages = { + 'missing_keys': _('Missing key(s): {missing_keys}.'), + 'invalid_keys': _('Invalid key(s): {invalid_keys}.'), + } + child_fields = { + # 'key': fields.ChildField(), + } + allow_unknown_keys = False + + def to_representation(self, value): + value = super(BaseDictWithChildField, self).to_representation(value) + for k, v in value.items(): + child_field = self.child_fields.get(k, None) + if child_field: + value[k] = child_field.to_representation(v) + elif allow_unknown_keys: + value[k] = v + return value + + def to_internal_value(self, data): + data = super(BaseDictWithChildField, self).to_internal_value(data) + missing_keys = set() + for key, child_field in self.child_fields.items(): + if not child_field.required: + continue + elif key not in data: + missing_keys.add(key) + if missing_keys: + keys_display = json.dumps(list(missing_keys)).lstrip('[').rstrip(']') + self.fail('missing_keys', missing_keys=keys_display) + if not self.allow_unknown_keys: + invalid_keys = set(data.keys()) - set(self.child_fields.keys()) + if invalid_keys: + keys_display = json.dumps(list(invalid_keys)).lstrip('[').rstrip(']') + self.fail('invalid_keys', invalid_keys=keys_display) + for k, v in data.items(): + child_field = self.child_fields.get(k, None) + if child_field: + data[k] = child_field.run_validation(v) + elif self.allow_unknown_keys: + data[k] = v + return data + + +class LDAPSingleOrganizationMapField(BaseDictWithChildField): + + default_error_messages = { + 'invalid_keys': _('Invalid key(s) for organization map: {invalid_keys}.'), + } + child_fields = { + 'admins': LDAPDNMapField(allow_null=True, required=False), + 'users': LDAPDNMapField(allow_null=True, required=False), + 'remove_admins': fields.BooleanField(required=False), + 'remove_users': fields.BooleanField(required=False), + } + + +class LDAPOrganizationMapField(fields.DictField): + + child = LDAPSingleOrganizationMapField() + + +class LDAPSingleTeamMapField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key for team map: {invalid_keys}.'), + 'invalid_keys': _('Invalid key(s) for team map: {invalid_keys}.'), + } + child_fields = { + 'organization': fields.CharField(), + 'users': LDAPDNMapField(allow_null=True, required=False), + 'remove': fields.BooleanField(required=False), + } + + +class LDAPTeamMapField(fields.DictField): + + child = LDAPSingleTeamMapField() + + +class RADIUSSecretField(fields.CharField): + + def to_internal_value(self, value): + value = super(RADIUSSecretField, self).to_internal_value(value) + if isinstance(value, unicode): + value = value.encode('utf-8') + return value + + +class SocialMapStringRegexField(fields.CharField): + + def to_representation(self, value): + if isinstance(value, type(re.compile(''))): + flags = [] + if value.flags & re.I: + flags.append('i') + if value.flags & re.M: + flags.append('m') + return '/{}/{}'.format(value.pattern, ''.join(flags)) + else: + return super(SocialMapStringRegexField, self).to_representation(value) + + def to_internal_value(self, data): + data = super(SocialMapStringRegexField, self).to_internal_value(data) + match = re.match(r'^/(?P.*)/(?P[im]+)?$', data) + if match: + flags = 0 + if match.group('flags'): + if 'i' in match.group('flags'): + flags |= re.I + if 'm' in match.group('flags'): + flags |= re.M + try: + return re.compile(match.group('pattern'), flags) + except re.error as e: + raise ValidationError('{}: {}'.format(e, data)) + return data + + +class SocialMapField(fields.ListField): + + default_error_messages = { + 'type_error': _('Expected None, True, False, a string or list of strings but got {input_type} instead.'), + } + child = SocialMapStringRegexField() + + def to_representation(self, value): + if isinstance(value, (list, tuple)): + return super(SocialMapField, self).to_representation(value) + elif value in fields.NullBooleanField.TRUE_VALUES: + return True + elif value in fields.NullBooleanField.FALSE_VALUES: + return False + elif value in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(value, (basestring, type(re.compile('')))): + return self.child.to_representation(value) + else: + self.fail('type_error', input_type=type(value)) + + def to_internal_value(self, data): + if isinstance(data, (list, tuple)): + return super(SocialMapField, self).to_internal_value(data) + elif data in fields.NullBooleanField.TRUE_VALUES: + return True + elif data in fields.NullBooleanField.FALSE_VALUES: + return False + elif data in fields.NullBooleanField.NULL_VALUES: + return None + elif isinstance(data, basestring): + return self.child.run_validation(data) + else: + self.fail('type_error', input_type=type(data)) + + +class SocialSingleOrganizationMapField(BaseDictWithChildField): + + default_error_messages = { + 'invalid_keys': _('Invalid key(s) for organization map: {invalid_keys}.'), + } + child_fields = { + 'admins': SocialMapField(allow_null=True, required=False), + 'users': SocialMapField(allow_null=True, required=False), + 'remove_admins': fields.BooleanField(required=False), + 'remove_users': fields.BooleanField(required=False), + } + + +class SocialOrganizationMapField(fields.DictField): + + child = SocialSingleOrganizationMapField() + + +class SocialSingleTeamMapField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key for team map: {missing_keys}.'), + 'invalid_keys': _('Invalid key(s) for team map: {invalid_keys}.'), + } + child_fields = { + 'organization': fields.CharField(), + 'users': SocialMapField(allow_null=True, required=False), + 'remove': fields.BooleanField(required=False), + } + + +class SocialTeamMapField(fields.DictField): + + child = SocialSingleTeamMapField() + + +class SAMLOrgInfoValueField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key(s) for org info record: {missing_keys}.'), + } + child_fields = { + 'name': fields.CharField(), + 'displayname': fields.CharField(), + 'url': fields.URLField(), + } + allow_unknown_keys = True + + +class SAMLOrgInfoField(fields.DictField): + + default_error_messages = { + 'invalid_lang_code': _('Invalid language code(s) for org info: {invalid_lang_codes}.'), + } + child = SAMLOrgInfoValueField() + + def to_internal_value(self, data): + data = super(SAMLOrgInfoField, self).to_internal_value(data) + invalid_keys = set() + for key in data.keys(): + if not re.match(r'^[a-z]{2}(?:-[a-z]{2})??$', key, re.I): + invalid_keys.add(key) + if invalid_keys: + keys_display = json.dumps(list(invalid_keys)).lstrip('[').rstrip(']') + self.fail('invalid_lang_code', invalid_lang_codes=keys_display) + return data + + +class SAMLContactField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key(s) for contact: {missing_keys}.'), + } + child_fields = { + 'givenName': fields.CharField(), + 'emailAddress': fields.EmailField(), + } + allow_unknown_keys = True + + +class SAMLIdPField(BaseDictWithChildField): + + default_error_messages = { + 'missing_keys': _('Missing required key(s) for IdP: {missing_keys}.'), + } + child_fields = { + 'entity_id': fields.URLField(), + 'url': fields.URLField(), + 'x509cert': fields.CharField(validators=[validate_certificate]), + 'attr_user_permanent_id': fields.CharField(required=False), + 'attr_first_name': fields.CharField(required=False), + 'attr_last_name': fields.CharField(required=False), + 'attr_username': fields.CharField(required=False), + 'attr_email': fields.CharField(required=False), + } + allow_unknown_keys = True + + +class SAMLEnabledIdPsField(fields.DictField): + + child = SAMLIdPField() diff --git a/awx/sso/pipeline.py b/awx/sso/pipeline.py index 756e64279d..738a9b3b0c 100644 --- a/awx/sso/pipeline.py +++ b/awx/sso/pipeline.py @@ -8,7 +8,7 @@ import re from social.exceptions import AuthException # Tower -from awx.api.license import feature_enabled +from awx.conf.license import feature_enabled class AuthNotFound(AuthException): diff --git a/awx/sso/validators.py b/awx/sso/validators.py new file mode 100644 index 0000000000..dd201f3e67 --- /dev/null +++ b/awx/sso/validators.py @@ -0,0 +1,60 @@ +# Python +import re + +# Python-LDAP +import ldap + +# Django +from django.core.exceptions import ValidationError +from django.utils.translation import ugettext_lazy as _ + +__all__ = ['validate_ldap_dn', 'validate_ldap_dn_with_user', + 'validate_ldap_bind_dn', 'validate_ldap_filter', + 'validate_ldap_filter_with_user'] + + +def validate_ldap_dn(value, with_user=False): + if with_user: + if '%(user)s' not in value: + raise ValidationError(_('DN must include "%%(user)s" placeholder for username: %s') % value) + dn_value = value.replace('%(user)s', 'USER') + else: + dn_value = value + try: + ldap.dn.str2dn(dn_value) + except ldap.DECODING_ERROR: + raise ValidationError(_('Invalid DN: %s') % value) + + +def validate_ldap_dn_with_user(value): + validate_ldap_dn(value, with_user=True) + + +def validate_ldap_bind_dn(value): + if not re.match(r'^[A-Za-z][A-Za-z0-9._-]*?\\[A-Za-z0-9 ._-]+?$', value.strip()): + validate_ldap_dn(value) + + +def validate_ldap_filter(value, with_user=False): + value = value.strip() + if not value: + return + if with_user: + if '%(user)s' not in value: + raise ValidationError(_('DN must include "%%(user)s" placeholder for username: %s') % value) + dn_value = value.replace('%(user)s', 'USER') + else: + dn_value = value + if re.match(r'^\([A-Za-z0-9]+?=[^()]+?\)$', dn_value): + return + elif re.match(r'^\([&|!]\(.*?\)\)$', dn_value): + try: + map(validate_ldap_filter, ['(%s)' % x for x in dn_value[3:-2].split(')(')]) + return + except ValidationError: + pass + raise ValidationError(_('Invalid filter: %s') % value) + + +def validate_ldap_filter_with_user(value): + validate_ldap_filter(value, with_user=True) diff --git a/awx/ui/__init__.py b/awx/ui/__init__.py index e484e62be1..ac6a554356 100644 --- a/awx/ui/__init__.py +++ b/awx/ui/__init__.py @@ -1,2 +1,4 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. + +default_app_config = 'awx.ui.apps.UIConfig' diff --git a/awx/ui/apps.py b/awx/ui/apps.py new file mode 100644 index 0000000000..40943c6f53 --- /dev/null +++ b/awx/ui/apps.py @@ -0,0 +1,9 @@ +# Django +from django.apps import AppConfig +from django.utils.translation import ugettext_lazy as _ + + +class UIConfig(AppConfig): + + name = 'awx.ui' + verbose_name = _('UI') diff --git a/awx/ui/conf.py b/awx/ui/conf.py new file mode 100644 index 0000000000..46fd4288c4 --- /dev/null +++ b/awx/ui/conf.py @@ -0,0 +1,23 @@ +# Copyright (c) 2016 Ansible, Inc. +# All Rights Reserved. + +# Django +from django.utils.translation import ugettext_lazy as _ + +# Tower +from awx.conf import fields, register + + +register( + 'PENDO_TRACKING_STATE', + field_class=fields.ChoiceField, + choices=[ + ('off', _('Off')), + ('anonymous', _('Anonymous')), + ('detailed', _('Detailed')), + ], + label=_('Analytics Tracking State'), + help_text=_('Enable or Disable Analytics Tracking.'), + category=_('UI'), + category_slug='ui', +) diff --git a/docs/licenses/baron.txt b/docs/licenses/baron.txt new file mode 100644 index 0000000000..65c5ca88a6 --- /dev/null +++ b/docs/licenses/baron.txt @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/docs/licenses/redbaron.txt b/docs/licenses/redbaron.txt new file mode 100644 index 0000000000..65c5ca88a6 --- /dev/null +++ b/docs/licenses/redbaron.txt @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/docs/licenses/rply.txt b/docs/licenses/rply.txt new file mode 100644 index 0000000000..ef355c032a --- /dev/null +++ b/docs/licenses/rply.txt @@ -0,0 +1,27 @@ +Copyright (c) Alex Gaynor and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of rply nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 433ae22e00..3e82d6bc81 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -4,6 +4,7 @@ anyjson==0.3.3 appdirs==1.4.0 azure==2.0.0rc2 Babel==2.2.0 +baron==0.6.2 billiard==3.3.0.16 boto==2.40.0 celery==3.1.23 @@ -112,9 +113,11 @@ rackspace-auth-openstack==1.3 rackspace-novaclient==1.5 rax-default-network-flags-python-novaclient-ext==0.3.2 rax-scheduled-images-python-novaclient-ext==0.3.1 +redbaron==0.6.1 requests-oauthlib==0.5.0 requests==2.9.1 requestsexceptions==1.1.1 +rply==0.7.4 shade==1.4.0 simplejson==3.8.1 six==1.9.0 From 16a7c1e49328302b3512b374e7aeecb604aa64cc Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 26 Sep 2016 22:15:12 -0400 Subject: [PATCH 07/37] Based on customer config example, entity ID doesn't have to be a URL. --- awx/sso/fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/sso/fields.py b/awx/sso/fields.py index ce8316e265..6655ad3523 100644 --- a/awx/sso/fields.py +++ b/awx/sso/fields.py @@ -581,7 +581,7 @@ class SAMLIdPField(BaseDictWithChildField): 'missing_keys': _('Missing required key(s) for IdP: {missing_keys}.'), } child_fields = { - 'entity_id': fields.URLField(), + 'entity_id': fields.CharField(), 'url': fields.URLField(), 'x509cert': fields.CharField(validators=[validate_certificate]), 'attr_user_permanent_id': fields.CharField(required=False), From 5ed59a4c52356c449cfeb32405b309ff771c9a1b Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 26 Sep 2016 22:15:20 -0400 Subject: [PATCH 08/37] For #3529, show trial in /api/v1/config/ even when false. From 41d05b0ae81e90778ec51ef0a7f8514eee7cad79 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 26 Sep 2016 22:15:26 -0400 Subject: [PATCH 09/37] Flake8 fixes. --- awx/conf/fields.py | 2 -- awx/conf/management/commands/migrate_to_database_settings.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/awx/conf/fields.py b/awx/conf/fields.py index b117bdf809..a560d3a637 100644 --- a/awx/conf/fields.py +++ b/awx/conf/fields.py @@ -1,7 +1,5 @@ # Python -import json import logging -import os import urlparse # Django diff --git a/awx/conf/management/commands/migrate_to_database_settings.py b/awx/conf/management/commands/migrate_to_database_settings.py index d369f362a0..36fd783475 100644 --- a/awx/conf/management/commands/migrate_to_database_settings.py +++ b/awx/conf/management/commands/migrate_to_database_settings.py @@ -112,7 +112,7 @@ class Command(BaseCommand): if os.path.exists(license_file): try: raw_license_data = open(license_file).read() - license_data = json.loads(raw_license_data) + json.loads(raw_license_data) except Exception as e: raise CommandError('Error reading license from {0}: {1!r}'.format(license_file, e)) if self.backup_suffix: From cc90204b0f1fbfea16d0f76516e5be61d28ebd35 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 20 Sep 2016 10:14:38 -0400 Subject: [PATCH 10/37] task manager using messages * First pass, adapt singleton task manager to process messages and run jobs based on events instead of a busy loop. * Still need to make message handing run in celery, not in a consumption loop --- awx/api/views.py | 1 + .../management/commands/run_task_system.py | 391 +++++++----------- awx/main/models/unified_jobs.py | 11 + awx/main/scheduler/__init__.py | 0 awx/main/scheduler/dag_simple.py | 133 ++++++ awx/main/scheduler/dag_workflow.py | 74 ++++ awx/main/tasks.py | 20 + awx/settings/defaults.py | 6 + 8 files changed, 386 insertions(+), 250 deletions(-) create mode 100644 awx/main/scheduler/__init__.py create mode 100644 awx/main/scheduler/dag_simple.py create mode 100644 awx/main/scheduler/dag_workflow.py diff --git a/awx/api/views.py b/awx/api/views.py index 551bb814e9..19e3fd425f 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -2290,6 +2290,7 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): new_job = obj.create_unified_job(**kv) result = new_job.signal_start(**kv) + if not result: data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start) new_job.delete() diff --git a/awx/main/management/commands/run_task_system.py b/awx/main/management/commands/run_task_system.py index 855491f08c..b29b2e4d88 100644 --- a/awx/main/management/commands/run_task_system.py +++ b/awx/main/management/commands/run_task_system.py @@ -7,6 +7,10 @@ import datetime import logging import signal import time +import traceback + +from kombu import Connection, Exchange, Queue, Producer +from kombu.mixins import ConsumerMixin # Django from django.conf import settings @@ -17,6 +21,8 @@ from awx.main.models import * # noqa from awx.main.queue import FifoQueue from awx.main.tasks import handle_work_error, handle_work_success from awx.main.utils import get_system_task_capacity +from awx.main.scheduler.dag_simple import SimpleDAG +from awx.main.scheduler.dag_workflow import WorkflowDAG # Celery from celery.task.control import inspect @@ -25,208 +31,6 @@ logger = logging.getLogger('awx.main.commands.run_task_system') queue = FifoQueue('tower_task_manager') -class SimpleDAG(object): - ''' A simple implementation of a directed acyclic graph ''' - - def __init__(self): - self.nodes = [] - self.edges = [] - - def __contains__(self, obj): - for node in self.nodes: - if node['node_object'] == obj: - return True - return False - - def __len__(self): - return len(self.nodes) - - def __iter__(self): - return self.nodes.__iter__() - - def generate_graphviz_plot(self): - def short_string_obj(obj): - if type(obj) == Job: - type_str = "Job" - if type(obj) == AdHocCommand: - type_str = "AdHocCommand" - elif type(obj) == InventoryUpdate: - type_str = "Inventory" - elif type(obj) == ProjectUpdate: - type_str = "Project" - elif type(obj) == WorkflowJob: - type_str = "Workflow" - else: - type_str = "Unknown" - type_str += "%s" % str(obj.id) - return type_str - - doc = """ - digraph g { - rankdir = LR - """ - for n in self.nodes: - doc += "%s [color = %s]\n" % ( - short_string_obj(n['node_object']), - "red" if n['node_object'].status == 'running' else "black", - ) - for from_node, to_node, label in self.edges: - doc += "%s -> %s [ label=\"%s\" ];\n" % ( - short_string_obj(self.nodes[from_node]['node_object']), - short_string_obj(self.nodes[to_node]['node_object']), - label, - ) - doc += "}\n" - gv_file = open('/tmp/graph.gv', 'w') - gv_file.write(doc) - gv_file.close() - - def add_node(self, obj, metadata=None): - if self.find_ord(obj) is None: - self.nodes.append(dict(node_object=obj, metadata=metadata)) - - def add_edge(self, from_obj, to_obj, label=None): - from_obj_ord = self.find_ord(from_obj) - to_obj_ord = self.find_ord(to_obj) - if from_obj_ord is None or to_obj_ord is None: - raise LookupError("Object not found") - self.edges.append((from_obj_ord, to_obj_ord, label)) - - def add_edges(self, edgelist): - for edge_pair in edgelist: - self.add_edge(edge_pair[0], edge_pair[1], edge_pair[2]) - - def find_ord(self, obj): - for idx in range(len(self.nodes)): - if obj == self.nodes[idx]['node_object']: - return idx - return None - - def get_node_type(self, obj): - if type(obj) == Job: - return "job" - elif type(obj) == AdHocCommand: - return "ad_hoc_command" - elif type(obj) == InventoryUpdate: - return "inventory_update" - elif type(obj) == ProjectUpdate: - return "project_update" - elif type(obj) == SystemJob: - return "system_job" - elif type(obj) == WorkflowJob: - return "workflow_job" - return "unknown" - - def get_dependencies(self, obj, label=None): - antecedents = [] - this_ord = self.find_ord(obj) - for node, dep, lbl in self.edges: - if label: - if node == this_ord and lbl == label: - antecedents.append(self.nodes[dep]) - else: - if node == this_ord: - antecedents.append(self.nodes[dep]) - return antecedents - - def get_dependents(self, obj, label=None): - decendents = [] - this_ord = self.find_ord(obj) - for node, dep, lbl in self.edges: - if label: - if dep == this_ord and lbl == label: - decendents.append(self.nodes[node]) - else: - if dep == this_ord: - decendents.append(self.nodes[node]) - return decendents - - def get_leaf_nodes(self): - leafs = [] - for n in self.nodes: - if len(self.get_dependencies(n['node_object'])) < 1: - leafs.append(n) - return leafs - - def get_root_nodes(self): - roots = [] - for n in self.nodes: - if len(self.get_dependents(n['node_object'])) < 1: - roots.append(n) - return roots - -class WorkflowDAG(SimpleDAG): - def __init__(self, workflow_job=None): - super(WorkflowDAG, self).__init__() - if workflow_job: - self._init_graph(workflow_job) - - def _init_graph(self, workflow_job): - workflow_nodes = workflow_job.workflow_job_nodes.all() - for workflow_node in workflow_nodes: - self.add_node(workflow_node) - - for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']: - for workflow_node in workflow_nodes: - related_nodes = getattr(workflow_node, node_type).all() - for related_node in related_nodes: - self.add_edge(workflow_node, related_node, node_type) - - def bfs_nodes_to_run(self): - root_nodes = self.get_root_nodes() - nodes = root_nodes - nodes_found = [] - - for index, n in enumerate(nodes): - obj = n['node_object'] - job = obj.job - - if not job: - nodes_found.append(n) - # Job is about to run or is running. Hold our horses and wait for - # the job to finish. We can't proceed down the graph path until we - # have the job result. - elif job.status not in ['failed', 'error', 'successful']: - continue - elif job.status in ['failed', 'error']: - children_failed = self.get_dependencies(obj, 'failure_nodes') - children_always = self.get_dependencies(obj, 'always_nodes') - children_all = children_failed + children_always - nodes.extend(children_all) - elif job.status in ['successful']: - children_success = self.get_dependencies(obj, 'success_nodes') - nodes.extend(children_success) - else: - logger.warn("Incorrect graph structure") - return [n['node_object'] for n in nodes_found] - - def is_workflow_done(self): - root_nodes = self.get_root_nodes() - nodes = root_nodes - - for index, n in enumerate(nodes): - obj = n['node_object'] - job = obj.job - - if not job: - return False - # Job is about to run or is running. Hold our horses and wait for - # the job to finish. We can't proceed down the graph path until we - # have the job result. - elif job.status not in ['failed', 'error', 'successful']: - return False - elif job.status in ['failed', 'error']: - children_failed = self.get_dependencies(obj, 'failure_nodes') - children_always = self.get_dependencies(obj, 'always_nodes') - children_all = children_failed + children_always - nodes.extend(children_all) - elif job.status in ['successful']: - children_success = self.get_dependencies(obj, 'success_nodes') - nodes.extend(children_success) - else: - logger.warn("Incorrect graph structure") - return True - def get_tasks(): """Fetch all Tower tasks that are relevant to the task management system. @@ -247,6 +51,7 @@ def get_tasks(): graph_project_updates + graph_system_jobs + graph_workflow_jobs, key=lambda task: task.created) + print("Returning all_actions %s" % len(all_actions)) return all_actions def get_running_workflow_jobs(): @@ -277,14 +82,16 @@ def do_spawn_workflow_jobs(): #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) -def rebuild_graph(message): +def rebuild_graph(): """Regenerate the task graph by refreshing known tasks from Tower, purging orphaned running tasks, and creating dependencies for new tasks before generating directed edge relationships between those tasks. """ + ''' # Sanity check: Only do this on the primary node. if Instance.objects.my_role() == 'secondary': return None + ''' inspector = inspect() if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): @@ -297,6 +104,7 @@ def rebuild_graph(message): all_sorted_tasks = get_tasks() if not len(all_sorted_tasks): + print("All sorted task len is not? <%s, %s>" % (len(all_sorted_tasks), all_sorted_tasks)) return None active_tasks = [] @@ -417,53 +225,132 @@ def process_graph(graph, task_capacity): 'Remaining Capacity: %s' % (str(node_obj), str(impact), str(remaining_volume))) -def run_taskmanager(): - """Receive task start and finish signals to rebuild a dependency graph - and manage the actual running of tasks. - """ - def shutdown_handler(): - def _handler(signum, frame): - signal.signal(signum, signal.SIG_DFL) - os.kill(os.getpid(), signum) - return _handler - signal.signal(signal.SIGINT, shutdown_handler()) - signal.signal(signal.SIGTERM, shutdown_handler()) - paused = False - task_capacity = get_system_task_capacity() - last_rebuild = datetime.datetime.fromtimestamp(0) - # Attempt to pull messages off of the task system queue into perpetuity. - # - # A quick explanation of what is happening here: - # The popping messages off the queue bit is something of a sham. We remove - # the messages from the queue and then immediately throw them away. The - # `rebuild_graph` function, while it takes the message as an argument, - # ignores it. - # - # What actually happens is that we just check the database every 10 seconds - # to see what the task dependency graph looks like, and go do that. This - # is the job of the `rebuild_graph` function. - # - # There is some placeholder here: we may choose to actually use the message - # in the future. - while True: - # Pop a message off the queue. - # (If the queue is empty, None will be returned.) - message = queue.pop() +#logger = logging.getLogger('awx.main.scheduler') - # Parse out the message appropriately, rebuilding our graph if - # appropriate. - if (datetime.datetime.now() - last_rebuild).seconds > 10: - if message is not None and 'pause' in message: - logger.info("Pause command received: %s" % str(message)) - paused = message['pause'] - graph = rebuild_graph(message) - if not paused and graph is not None: - process_graph(graph, task_capacity) - last_rebuild = datetime.datetime.now() - time.sleep(0.1) +class CallbackBrokerWorker(ConsumerMixin): + def __init__(self, connection): + self.connection = connection + def get_consumers(self, Consumer, channel): + print("get_consumers() OK") + return [Consumer(queues=[Queue(settings.SCHEDULER_QUEUE, + Exchange(settings.SCHEDULER_QUEUE, type='topic'), + routing_key='scheduler.job.launch'),], + accept=['json'], + callbacks=[self.process_job_launch,]), + Consumer(queues=[Queue(settings.SCHEDULER_QUEUE, + Exchange(settings.SCHEDULER_QUEUE, type='topic'), + routing_key='scheduler.job.complete'),], + accept=['json'], + callbacks=[self.process_job_complete,] + )] + + def schedule(self): + task_capacity = get_system_task_capacity() + graph = rebuild_graph() + if graph: + process_graph(graph, task_capacity) + + def process_job_msg(self, body, message): + try: + if settings.DEBUG: + logger.info("Body: {}".format(body)) + logger.info("Message: {}".format(message)) + + if "msg_type" not in body: + raise Exception("Payload does not have a msg_type") + if "job_id" not in body: + raise Exception("Payload does not have a job_id") + + func = getattr(self, "process_%s" % body['msg_type'], None) + if not func: + raise AttributeError("No processor for message type %s" % body['msg_type']) + func(body) + + # Raised by processors when msg isn't in the expected form. + except LookupError as e: + logger.error(e) + except AttributeError as e: + logger.error(e) + except Exception as exc: + import traceback + traceback.print_exc() + logger.error('Callback Task Processor Raised Exception: %r', exc) + finally: + message.ack() + self.schedule() + + def process_job_launch(self, body, message): + print("process_job_launch()") + if "job_id" not in body: + raise KeyError("Payload does not contain job_id") + + ''' + Wait for job to exist. + The job is created in a transaction then the message is created, but + the transaction may not have completed. + + FIXME: We could generate the message in a Django signal handler. + OR, we could call an explicit commit in the view and then send the + message. + + ''' + retries = 10 + retry = 0 + while not UnifiedJob.objects.filter(id=body['job_id']).exists(): + time.sleep(0.3) + + if retry >= retries: + logger.error("Failed to process 'job_launch' message for job %d" % body['job_id']) + # ack the message so we don't build up the queue. + # + # The job can still be chosen to run during tower startup or + # when another job is started or completes + message.ack() + return + retry += 1 + + job = UnifiedJob.objects.get(id=body['job_id']) + + self.schedule() + message.ack() + + def process_job_complete(self, body, message): + print("process_job_complete()") + if "job_id" not in body: + raise KeyError("Payload does not contain job_id") + + # TODO: use list of finished status from jobs.py or unified_jobs.py + finished_status = ['successful', 'error', 'failed', 'completed'] + q = UnifiedJob.objects.filter(id=body['job_id']) + + # Ensure that the job is updated in the database before we call to + # schedule the next job. + retries = 10 + retry = 0 + while True: + # Job not found, most likely deleted. That's fine + if not q.exists(): + logger.warn("Failed to find job '%d' while processing 'job_complete' message. Presume that it was deleted." % body['job_id']) + break + + job = q[0] + if job.status in finished_status: + break + + time.sleep(0.3) + + if retry >= retries: + logger.error("Expected job status '%s' to be one of '%s' while processing 'job_complete' message." % (job.status, finished_status)) + message.ack() + return + retry += 1 + + message.ack() + self.schedule() + class Command(NoArgsCommand): """Tower Task Management System This daemon is designed to reside between our tasks and celery and @@ -477,7 +364,11 @@ class Command(NoArgsCommand): help = 'Launch the Tower task management system' def handle_noargs(self, **options): - try: - run_taskmanager() - except KeyboardInterrupt: - pass + with Connection(settings.BROKER_URL) as conn: + try: + worker = CallbackBrokerWorker(conn) + worker.run() + except KeyboardInterrupt: + print('Terminating Task Management System') + + diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 950b6fc99b..6806ff7d16 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -852,6 +852,17 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique self.update_fields(start_args=json.dumps(kwargs), status='pending') self.socketio_emit_status("pending") + from kombu import Connection, Exchange, Producer + connection = Connection(settings.BROKER_URL) + exchange = Exchange(settings.SCHEDULER_QUEUE, type='topic') + producer = Producer(connection) + producer.publish({ 'msg_type': 'job_launch', 'job_id': self.id }, + serializer='json', + compression='bzip2', + exchange=exchange, + declare=[exchange], + routing_key='scheduler.job.launch') + # Each type of unified job has a different Task class; get the # appropirate one. # task_type = get_type_for_model(self) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/main/scheduler/dag_simple.py b/awx/main/scheduler/dag_simple.py new file mode 100644 index 0000000000..f04c60159a --- /dev/null +++ b/awx/main/scheduler/dag_simple.py @@ -0,0 +1,133 @@ + +from awx.main.models import * # noqa + +class SimpleDAG(object): + ''' A simple implementation of a directed acyclic graph ''' + + def __init__(self): + self.nodes = [] + self.edges = [] + + def __contains__(self, obj): + for node in self.nodes: + if node['node_object'] == obj: + return True + return False + + def __len__(self): + return len(self.nodes) + + def __iter__(self): + return self.nodes.__iter__() + + def generate_graphviz_plot(self): + def short_string_obj(obj): + if type(obj) == Job: + type_str = "Job" + if type(obj) == AdHocCommand: + type_str = "AdHocCommand" + elif type(obj) == InventoryUpdate: + type_str = "Inventory" + elif type(obj) == ProjectUpdate: + type_str = "Project" + elif type(obj) == WorkflowJob: + type_str = "Workflow" + else: + type_str = "Unknown" + type_str += "%s" % str(obj.id) + return type_str + + doc = """ + digraph g { + rankdir = LR + """ + for n in self.nodes: + doc += "%s [color = %s]\n" % ( + short_string_obj(n['node_object']), + "red" if n['node_object'].status == 'running' else "black", + ) + for from_node, to_node, label in self.edges: + doc += "%s -> %s [ label=\"%s\" ];\n" % ( + short_string_obj(self.nodes[from_node]['node_object']), + short_string_obj(self.nodes[to_node]['node_object']), + label, + ) + doc += "}\n" + gv_file = open('/tmp/graph.gv', 'w') + gv_file.write(doc) + gv_file.close() + + def add_node(self, obj, metadata=None): + if self.find_ord(obj) is None: + self.nodes.append(dict(node_object=obj, metadata=metadata)) + + def add_edge(self, from_obj, to_obj, label=None): + from_obj_ord = self.find_ord(from_obj) + to_obj_ord = self.find_ord(to_obj) + if from_obj_ord is None or to_obj_ord is None: + raise LookupError("Object not found") + self.edges.append((from_obj_ord, to_obj_ord, label)) + + def add_edges(self, edgelist): + for edge_pair in edgelist: + self.add_edge(edge_pair[0], edge_pair[1], edge_pair[2]) + + def find_ord(self, obj): + for idx in range(len(self.nodes)): + if obj == self.nodes[idx]['node_object']: + return idx + return None + + def get_node_type(self, obj): + if type(obj) == Job: + return "job" + elif type(obj) == AdHocCommand: + return "ad_hoc_command" + elif type(obj) == InventoryUpdate: + return "inventory_update" + elif type(obj) == ProjectUpdate: + return "project_update" + elif type(obj) == SystemJob: + return "system_job" + elif type(obj) == WorkflowJob: + return "workflow_job" + return "unknown" + + def get_dependencies(self, obj, label=None): + antecedents = [] + this_ord = self.find_ord(obj) + for node, dep, lbl in self.edges: + if label: + if node == this_ord and lbl == label: + antecedents.append(self.nodes[dep]) + else: + if node == this_ord: + antecedents.append(self.nodes[dep]) + return antecedents + + def get_dependents(self, obj, label=None): + decendents = [] + this_ord = self.find_ord(obj) + for node, dep, lbl in self.edges: + if label: + if dep == this_ord and lbl == label: + decendents.append(self.nodes[node]) + else: + if dep == this_ord: + decendents.append(self.nodes[node]) + return decendents + + def get_leaf_nodes(self): + leafs = [] + for n in self.nodes: + if len(self.get_dependencies(n['node_object'])) < 1: + leafs.append(n) + return leafs + + def get_root_nodes(self): + roots = [] + for n in self.nodes: + if len(self.get_dependents(n['node_object'])) < 1: + roots.append(n) + return roots + diff --git a/awx/main/scheduler/dag_workflow.py b/awx/main/scheduler/dag_workflow.py new file mode 100644 index 0000000000..1a8269c064 --- /dev/null +++ b/awx/main/scheduler/dag_workflow.py @@ -0,0 +1,74 @@ +from dag_simple import SimpleDAG + +class WorkflowDAG(SimpleDAG): + def __init__(self, workflow_job=None): + super(WorkflowDAG, self).__init__() + if workflow_job: + self._init_graph(workflow_job) + + def _init_graph(self, workflow_job): + workflow_nodes = workflow_job.workflow_job_nodes.all() + for workflow_node in workflow_nodes: + self.add_node(workflow_node) + + for node_type in ['success_nodes', 'failure_nodes', 'always_nodes']: + for workflow_node in workflow_nodes: + related_nodes = getattr(workflow_node, node_type).all() + for related_node in related_nodes: + self.add_edge(workflow_node, related_node, node_type) + + def bfs_nodes_to_run(self): + root_nodes = self.get_root_nodes() + nodes = root_nodes + nodes_found = [] + + for index, n in enumerate(nodes): + obj = n['node_object'] + job = obj.job + + if not job: + nodes_found.append(n) + # Job is about to run or is running. Hold our horses and wait for + # the job to finish. We can't proceed down the graph path until we + # have the job result. + elif job.status not in ['failed', 'error', 'successful']: + continue + elif job.status in ['failed', 'error']: + children_failed = self.get_dependencies(obj, 'failure_nodes') + children_always = self.get_dependencies(obj, 'always_nodes') + children_all = children_failed + children_always + nodes.extend(children_all) + elif job.status in ['successful']: + children_success = self.get_dependencies(obj, 'success_nodes') + nodes.extend(children_success) + else: + logger.warn("Incorrect graph structure") + return [n['node_object'] for n in nodes_found] + + def is_workflow_done(self): + root_nodes = self.get_root_nodes() + nodes = root_nodes + + for index, n in enumerate(nodes): + obj = n['node_object'] + job = obj.job + + if not job: + return False + # Job is about to run or is running. Hold our horses and wait for + # the job to finish. We can't proceed down the graph path until we + # have the job result. + elif job.status not in ['failed', 'error', 'successful']: + return False + elif job.status in ['failed', 'error']: + children_failed = self.get_dependencies(obj, 'failure_nodes') + children_always = self.get_dependencies(obj, 'always_nodes') + children_all = children_failed + children_always + nodes.extend(children_all) + elif job.status in ['successful']: + children_success = self.get_dependencies(obj, 'success_nodes') + nodes.extend(children_success) + else: + logger.warn("Incorrect graph structure") + return True + diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 097dca517d..86552b6404 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -31,6 +31,9 @@ except: # Pexpect import pexpect +# Kombu +from kombu import Connection, Exchange, Queue, Producer + # Celery from celery import Task, task from celery.signals import celeryd_init @@ -202,6 +205,18 @@ def _send_notification_templates(instance, status_str): for n in all_notification_templates], job_id=instance.id) + +def _send_job_complete_msg(instance): + connection = Connection(settings.BROKER_URL) + exchange = Exchange(settings.SCHEDULER_QUEUE, type='topic') + producer = Producer(connection) + producer.publish({ 'job_id': instance.id, 'msg_type': 'job_complete' }, + serializer='json', + compression='bzip2', + exchange=exchange, + declare=[exchange], + routing_key='scheduler.job.complete') + @task(bind=True, queue='default') def handle_work_success(self, result, task_actual): instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) @@ -210,6 +225,8 @@ def handle_work_success(self, result, task_actual): _send_notification_templates(instance, 'succeeded') + _send_job_complete_msg(instance) + @task(bind=True, queue='default') def handle_work_error(self, task_id, subtasks=None): print('Executing error task id %s, subtasks: %s' % @@ -238,6 +255,9 @@ def handle_work_error(self, task_id, subtasks=None): if first_instance: _send_notification_templates(first_instance, 'failed') + + if first_instance: + _send_job_complete_msg(first_instance) @task(queue='default') def update_inventory_computed_fields(inventory_id, should_update_hosts=True): diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 31c8b3b8f3..8a65d7d322 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -342,6 +342,7 @@ CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' CELERY_QUEUES = ( Queue('default', Exchange('default'), routing_key='default'), Queue('jobs', Exchange('jobs'), routing_key='jobs'), + #Queue('scheduler', Exchange('scheduler'), routing_key='scheduler.job.#'), # Projects use a fanout queue, this isn't super well supported Broadcast('projects'), ) @@ -737,6 +738,7 @@ ACTIVITY_STREAM_ENABLED_FOR_INVENTORY_SYNC = False INTERNAL_API_URL = 'http://127.0.0.1:%s' % DEVSERVER_DEFAULT_PORT CALLBACK_QUEUE = "callback_tasks" +SCHEDULER_QUEUE = "scheduler" TASK_COMMAND_PORT = 6559 @@ -1042,6 +1044,10 @@ LOGGING = { 'handlers': ['console', 'file', 'task_system'], 'propagate': False }, + 'awx.main.scheduler': { + 'handlers': ['console', 'file', 'task_system'], + 'propagate': False + }, 'awx.main.commands.run_fact_cache_receiver': { 'handlers': ['console', 'file', 'fact_receiver'], 'propagate': False From cdb65ccac9ead06a081d641ffbe084374cc0dd77 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 22 Sep 2016 11:20:24 -0400 Subject: [PATCH 11/37] replace task manager with event driven scheduler --- Makefile | 13 +- Procfile | 3 +- .../management/commands/run_task_system.py | 374 ------------------ awx/main/models/unified_jobs.py | 12 +- awx/main/scheduler/__init__.py | 222 +++++++++++ awx/main/scheduler/dag_simple.py | 9 +- awx/main/scheduler/dag_workflow.py | 8 +- awx/main/scheduler/tasks.py | 79 ++++ awx/main/tasks.py | 43 +- awx/main/tests/base.py | 14 +- .../test_dag.py} | 12 +- awx/settings/defaults.py | 11 +- 12 files changed, 344 insertions(+), 456 deletions(-) delete mode 100644 awx/main/management/commands/run_task_system.py create mode 100644 awx/main/scheduler/tasks.py rename awx/main/tests/unit/{commands/test_run_task_system.py => scheduler/test_dag.py} (97%) diff --git a/Makefile b/Makefile index fd9d87cd2e..52c30c4bb7 100644 --- a/Makefile +++ b/Makefile @@ -357,7 +357,6 @@ server_noattach: tmux rename-window 'Tower' tmux select-window -t tower:0 tmux split-window -v 'exec make celeryd' - tmux split-window -h 'exec make taskmanager' tmux new-window 'exec make receiver' tmux select-window -t tower:1 tmux rename-window 'Extra Services' @@ -397,7 +396,7 @@ celeryd: @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ fi; \ - $(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default + $(PYTHON) manage.py celeryd -l DEBUG -B --autoscale=20,3 --schedule=$(CELERY_SCHEDULE_FILE) -Q projects,jobs,default,scheduler #$(PYTHON) manage.py celery multi show projects jobs default -l DEBUG -Q:projects projects -Q:jobs jobs -Q:default default -c:projects 1 -c:jobs 3 -c:default 3 -Ofair -B --schedule=$(CELERY_SCHEDULE_FILE) # Run to start the zeromq callback receiver @@ -407,16 +406,6 @@ receiver: fi; \ $(PYTHON) manage.py run_callback_receiver -taskmanager: - @if [ "$(VENV_BASE)" ]; then \ - . $(VENV_BASE)/tower/bin/activate; \ - fi; \ - if [ "$(COMPOSE_HOST)" == "tower_1" ] || [ "$(COMPOSE_HOST)" == "tower" ]; then \ - $(PYTHON) manage.py run_task_system; \ - else \ - while true; do sleep 2; done; \ - fi - socketservice: @if [ "$(VENV_BASE)" ]; then \ . $(VENV_BASE)/tower/bin/activate; \ diff --git a/Procfile b/Procfile index 433417f70b..b8dd37a983 100644 --- a/Procfile +++ b/Procfile @@ -1,7 +1,6 @@ runserver: make runserver celeryd: make celeryd -taskmanager: make taskmanager receiver: make receiver socketservice: make socketservice factcacher: make factcacher -flower: make flower \ No newline at end of file +flower: make flower diff --git a/awx/main/management/commands/run_task_system.py b/awx/main/management/commands/run_task_system.py deleted file mode 100644 index b29b2e4d88..0000000000 --- a/awx/main/management/commands/run_task_system.py +++ /dev/null @@ -1,374 +0,0 @@ -#Copyright (c) 2015 Ansible, Inc. -# All Rights Reserved - -# Python -import os -import datetime -import logging -import signal -import time -import traceback - -from kombu import Connection, Exchange, Queue, Producer -from kombu.mixins import ConsumerMixin - -# Django -from django.conf import settings -from django.core.management.base import NoArgsCommand - -# AWX -from awx.main.models import * # noqa -from awx.main.queue import FifoQueue -from awx.main.tasks import handle_work_error, handle_work_success -from awx.main.utils import get_system_task_capacity -from awx.main.scheduler.dag_simple import SimpleDAG -from awx.main.scheduler.dag_workflow import WorkflowDAG - -# Celery -from celery.task.control import inspect - -logger = logging.getLogger('awx.main.commands.run_task_system') - -queue = FifoQueue('tower_task_manager') - -def get_tasks(): - """Fetch all Tower tasks that are relevant to the task management - system. - """ - RELEVANT_JOBS = ('pending', 'waiting', 'running') - # TODO: Replace this when we can grab all objects in a sane way. - graph_jobs = [j for j in Job.objects.filter(status__in=RELEVANT_JOBS)] - graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(status__in=RELEVANT_JOBS)] - graph_inventory_updates = [iu for iu in - InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS)] - graph_project_updates = [pu for pu in - ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)] - graph_system_jobs = [sj for sj in - SystemJob.objects.filter(status__in=RELEVANT_JOBS)] - graph_workflow_jobs = [wf for wf in - WorkflowJob.objects.filter(status__in=RELEVANT_JOBS)] - all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + - graph_project_updates + graph_system_jobs + - graph_workflow_jobs, - key=lambda task: task.created) - print("Returning all_actions %s" % len(all_actions)) - return all_actions - -def get_running_workflow_jobs(): - graph_workflow_jobs = [wf for wf in - WorkflowJob.objects.filter(status='running')] - return graph_workflow_jobs - -def do_spawn_workflow_jobs(): - workflow_jobs = get_running_workflow_jobs() - for workflow_job in workflow_jobs: - dag = WorkflowDAG(workflow_job) - spawn_nodes = dag.bfs_nodes_to_run() - for spawn_node in spawn_nodes: - # TODO: Inject job template template params as kwargs. - # Make sure to take into account extra_vars merge logic - kv = {} - job = spawn_node.unified_job_template.create_unified_job(**kv) - spawn_node.job = job - spawn_node.save() - can_start = job.signal_start(**kv) - if not can_start: - job.status = 'failed' - job.job_explanation = "Workflow job could not start because it was not in the right state or required manual credentials" - job.save(update_fields=['status', 'job_explanation']) - job.socketio_emit_status("failed") - - # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? - #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) - - -def rebuild_graph(): - """Regenerate the task graph by refreshing known tasks from Tower, purging - orphaned running tasks, and creating dependencies for new tasks before - generating directed edge relationships between those tasks. - """ - ''' - # Sanity check: Only do this on the primary node. - if Instance.objects.my_role() == 'secondary': - return None - ''' - - inspector = inspect() - if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): - active_task_queues = inspector.active() - else: - logger.warn("Ignoring celery task inspector") - active_task_queues = None - - do_spawn_workflow_jobs() - - all_sorted_tasks = get_tasks() - if not len(all_sorted_tasks): - print("All sorted task len is not? <%s, %s>" % (len(all_sorted_tasks), all_sorted_tasks)) - return None - - active_tasks = [] - if active_task_queues is not None: - for queue in active_task_queues: - active_tasks += [at['id'] for at in active_task_queues[queue]] - else: - logger.error("Could not communicate with celery!") - # TODO: Something needs to be done here to signal to the system - # as a whole that celery appears to be down. - if not hasattr(settings, 'CELERY_UNIT_TEST'): - return None - - running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks) - waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks) - new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks) - - # Check running tasks and make sure they are active in celery - logger.debug("Active celery tasks: " + str(active_tasks)) - for task in list(running_tasks): - if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): - # NOTE: Pull status again and make sure it didn't finish in - # the meantime? - task.status = 'failed' - task.job_explanation += ' '.join(( - 'Task was marked as running in Tower but was not present in', - 'Celery, so it has been marked as failed.', - )) - task.save() - task.socketio_emit_status("failed") - running_tasks.pop(running_tasks.index(task)) - logger.error("Task %s appears orphaned... marking as failed" % task) - - # Create and process dependencies for new tasks - for task in new_tasks: - logger.debug("Checking dependencies for: %s" % str(task)) - try: - task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks) - except Exception, e: - logger.error("Failed processing dependencies for {}: {}".format(task, e)) - task.status = 'failed' - task.job_explanation += 'Task failed to generate dependencies: {}'.format(e) - task.save() - task.socketio_emit_status("failed") - continue - logger.debug("New dependencies: %s" % str(task_dependencies)) - for dep in task_dependencies: - # We recalculate the created time for the moment to ensure the - # dependencies are always sorted in the right order relative to - # the dependent task. - time_delt = len(task_dependencies) - task_dependencies.index(dep) - dep.created = task.created - datetime.timedelta(seconds=1 + time_delt) - dep.status = 'waiting' - dep.save() - waiting_tasks.insert(waiting_tasks.index(task), dep) - if not hasattr(settings, 'UNIT_TEST_IGNORE_TASK_WAIT'): - task.status = 'waiting' - task.save() - - # Rebuild graph - graph = SimpleDAG() - for task in running_tasks: - graph.add_node(task) - for wait_task in waiting_tasks[:50]: - node_dependencies = [] - for node in graph: - if wait_task.is_blocked_by(node['node_object']): - node_dependencies.append(node['node_object']) - graph.add_node(wait_task) - for dependency in node_dependencies: - graph.add_edge(wait_task, dependency) - if settings.DEBUG: - graph.generate_graphviz_plot() - return graph - -def process_graph(graph, task_capacity): - """Given a task dependency graph, start and manage tasks given their - priority and weight. - """ - leaf_nodes = graph.get_leaf_nodes() - running_nodes = filter(lambda x: x['node_object'].status == 'running', leaf_nodes) - running_impact = sum([t['node_object'].task_impact for t in running_nodes]) - ready_nodes = filter(lambda x: x['node_object'].status != 'running', leaf_nodes) - remaining_volume = task_capacity - running_impact - logger.info('Running Nodes: %s; Capacity: %s; Running Impact: %s; ' - 'Remaining Capacity: %s' % - (str(running_nodes), str(task_capacity), - str(running_impact), str(remaining_volume))) - logger.info("Ready Nodes: %s" % str(ready_nodes)) - for task_node in ready_nodes: - node_obj = task_node['node_object'] - # NOTE: This could be used to pass metadata through the task system - # node_args = task_node['metadata'] - impact = node_obj.task_impact - if impact <= remaining_volume or running_impact == 0: - node_dependencies = graph.get_dependents(node_obj) - # Allow other tasks to continue if a job fails, even if they are - # other jobs. - if graph.get_node_type(node_obj) == 'job': - node_dependencies = [] - dependent_nodes = [{'type': graph.get_node_type(node_obj), 'id': node_obj.id}] + \ - [{'type': graph.get_node_type(n['node_object']), - 'id': n['node_object'].id} for n in node_dependencies] - error_handler = handle_work_error.s(subtasks=dependent_nodes) - success_handler = handle_work_success.s(task_actual={'type': graph.get_node_type(node_obj), - 'id': node_obj.id}) - start_status = node_obj.start(error_callback=error_handler, success_callback=success_handler) - if not start_status: - node_obj.status = 'failed' - if node_obj.job_explanation: - node_obj.job_explanation += ' ' - node_obj.job_explanation += 'Task failed pre-start check.' - node_obj.save() - continue - remaining_volume -= impact - running_impact += impact - logger.info('Started Node: %s (capacity hit: %s) ' - 'Remaining Capacity: %s' % - (str(node_obj), str(impact), str(remaining_volume))) - - -#logger = logging.getLogger('awx.main.scheduler') - -class CallbackBrokerWorker(ConsumerMixin): - - def __init__(self, connection): - self.connection = connection - - def get_consumers(self, Consumer, channel): - print("get_consumers() OK") - return [Consumer(queues=[Queue(settings.SCHEDULER_QUEUE, - Exchange(settings.SCHEDULER_QUEUE, type='topic'), - routing_key='scheduler.job.launch'),], - accept=['json'], - callbacks=[self.process_job_launch,]), - Consumer(queues=[Queue(settings.SCHEDULER_QUEUE, - Exchange(settings.SCHEDULER_QUEUE, type='topic'), - routing_key='scheduler.job.complete'),], - accept=['json'], - callbacks=[self.process_job_complete,] - )] - - def schedule(self): - task_capacity = get_system_task_capacity() - graph = rebuild_graph() - if graph: - process_graph(graph, task_capacity) - - def process_job_msg(self, body, message): - try: - if settings.DEBUG: - logger.info("Body: {}".format(body)) - logger.info("Message: {}".format(message)) - - if "msg_type" not in body: - raise Exception("Payload does not have a msg_type") - if "job_id" not in body: - raise Exception("Payload does not have a job_id") - - func = getattr(self, "process_%s" % body['msg_type'], None) - if not func: - raise AttributeError("No processor for message type %s" % body['msg_type']) - func(body) - - # Raised by processors when msg isn't in the expected form. - except LookupError as e: - logger.error(e) - except AttributeError as e: - logger.error(e) - except Exception as exc: - import traceback - traceback.print_exc() - logger.error('Callback Task Processor Raised Exception: %r', exc) - finally: - message.ack() - self.schedule() - - def process_job_launch(self, body, message): - print("process_job_launch()") - if "job_id" not in body: - raise KeyError("Payload does not contain job_id") - - ''' - Wait for job to exist. - The job is created in a transaction then the message is created, but - the transaction may not have completed. - - FIXME: We could generate the message in a Django signal handler. - OR, we could call an explicit commit in the view and then send the - message. - - ''' - retries = 10 - retry = 0 - while not UnifiedJob.objects.filter(id=body['job_id']).exists(): - time.sleep(0.3) - - if retry >= retries: - logger.error("Failed to process 'job_launch' message for job %d" % body['job_id']) - # ack the message so we don't build up the queue. - # - # The job can still be chosen to run during tower startup or - # when another job is started or completes - message.ack() - return - retry += 1 - - job = UnifiedJob.objects.get(id=body['job_id']) - - self.schedule() - message.ack() - - def process_job_complete(self, body, message): - print("process_job_complete()") - if "job_id" not in body: - raise KeyError("Payload does not contain job_id") - - # TODO: use list of finished status from jobs.py or unified_jobs.py - finished_status = ['successful', 'error', 'failed', 'completed'] - q = UnifiedJob.objects.filter(id=body['job_id']) - - # Ensure that the job is updated in the database before we call to - # schedule the next job. - retries = 10 - retry = 0 - while True: - # Job not found, most likely deleted. That's fine - if not q.exists(): - logger.warn("Failed to find job '%d' while processing 'job_complete' message. Presume that it was deleted." % body['job_id']) - break - - job = q[0] - if job.status in finished_status: - break - - time.sleep(0.3) - - if retry >= retries: - logger.error("Expected job status '%s' to be one of '%s' while processing 'job_complete' message." % (job.status, finished_status)) - message.ack() - return - retry += 1 - - message.ack() - self.schedule() - -class Command(NoArgsCommand): - """Tower Task Management System - This daemon is designed to reside between our tasks and celery and - provide a mechanism for understanding the relationship between those tasks - and their dependencies. - - It also actively prevents situations in which Tower can get blocked - because it doesn't have an understanding of what is progressing through - celery. - """ - help = 'Launch the Tower task management system' - - def handle_noargs(self, **options): - with Connection(settings.BROKER_URL) as conn: - try: - worker = CallbackBrokerWorker(conn) - worker.run() - except KeyboardInterrupt: - print('Terminating Task Management System') - - diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 6806ff7d16..a81bcb6aca 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -852,16 +852,8 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique self.update_fields(start_args=json.dumps(kwargs), status='pending') self.socketio_emit_status("pending") - from kombu import Connection, Exchange, Producer - connection = Connection(settings.BROKER_URL) - exchange = Exchange(settings.SCHEDULER_QUEUE, type='topic') - producer = Producer(connection) - producer.publish({ 'msg_type': 'job_launch', 'job_id': self.id }, - serializer='json', - compression='bzip2', - exchange=exchange, - declare=[exchange], - routing_key='scheduler.job.launch') + from awx.main.scheduler.tasks import run_job_launch + run_job_launch.delay(self.id) # Each type of unified job has a different Task class; get the # appropirate one. diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index e69de29bb2..1c3a1bc515 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -0,0 +1,222 @@ +#Copyright (c) 2015 Ansible, Inc. +# All Rights Reserved + +# Python +import datetime +import logging + +# Django +from django.conf import settings + +# AWX +from awx.main.models import * # noqa +from awx.main.tasks import handle_work_error, handle_work_success +from awx.main.utils import get_system_task_capacity +from awx.main.scheduler.dag_simple import SimpleDAG +from awx.main.scheduler.dag_workflow import WorkflowDAG + +# Celery +from celery.task.control import inspect + +logger = logging.getLogger('awx.main.scheduler') + +def get_tasks(): + """Fetch all Tower tasks that are relevant to the task management + system. + """ + RELEVANT_JOBS = ('pending', 'waiting', 'running') + # TODO: Replace this when we can grab all objects in a sane way. + graph_jobs = [j for j in Job.objects.filter(status__in=RELEVANT_JOBS)] + graph_ad_hoc_commands = [ahc for ahc in AdHocCommand.objects.filter(status__in=RELEVANT_JOBS)] + graph_inventory_updates = [iu for iu in + InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS)] + graph_project_updates = [pu for pu in + ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS)] + graph_system_jobs = [sj for sj in + SystemJob.objects.filter(status__in=RELEVANT_JOBS)] + graph_workflow_jobs = [wf for wf in + WorkflowJob.objects.filter(status__in=RELEVANT_JOBS)] + all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + + graph_project_updates + graph_system_jobs + + graph_workflow_jobs, + key=lambda task: task.created) + return all_actions + +def get_running_workflow_jobs(): + graph_workflow_jobs = [wf for wf in + WorkflowJob.objects.filter(status='running')] + return graph_workflow_jobs + +def do_spawn_workflow_jobs(): + workflow_jobs = get_running_workflow_jobs() + for workflow_job in workflow_jobs: + dag = WorkflowDAG(workflow_job) + spawn_nodes = dag.bfs_nodes_to_run() + for spawn_node in spawn_nodes: + # TODO: Inject job template template params as kwargs. + # Make sure to take into account extra_vars merge logic + kv = {} + job = spawn_node.unified_job_template.create_unified_job(**kv) + spawn_node.job = job + spawn_node.save() + can_start = job.signal_start(**kv) + if not can_start: + job.status = 'failed' + job.job_explanation = "Workflow job could not start because it was not in the right state or required manual credentials" + job.save(update_fields=['status', 'job_explanation']) + job.socketio_emit_status("failed") + + # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? + #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) + + +def rebuild_graph(): + """Regenerate the task graph by refreshing known tasks from Tower, purging + orphaned running tasks, and creating dependencies for new tasks before + generating directed edge relationships between those tasks. + """ + ''' + # Sanity check: Only do this on the primary node. + if Instance.objects.my_role() == 'secondary': + return None + ''' + + inspector = inspect() + if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'): + active_task_queues = inspector.active() + else: + logger.warn("Ignoring celery task inspector") + active_task_queues = None + + do_spawn_workflow_jobs() + + all_sorted_tasks = get_tasks() + if not len(all_sorted_tasks): + return None + + active_tasks = [] + if active_task_queues is not None: + for queue in active_task_queues: + active_tasks += [at['id'] for at in active_task_queues[queue]] + else: + logger.error("Could not communicate with celery!") + # TODO: Something needs to be done here to signal to the system + # as a whole that celery appears to be down. + if not hasattr(settings, 'CELERY_UNIT_TEST'): + return None + + running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks) + waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks) + new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks) + + # Check running tasks and make sure they are active in celery + logger.debug("Active celery tasks: " + str(active_tasks)) + for task in list(running_tasks): + if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): + # NOTE: Pull status again and make sure it didn't finish in + # the meantime? + task.status = 'failed' + task.job_explanation += ' '.join(( + 'Task was marked as running in Tower but was not present in', + 'Celery, so it has been marked as failed.', + )) + task.save() + task.socketio_emit_status("failed") + running_tasks.pop(running_tasks.index(task)) + logger.error("Task %s appears orphaned... marking as failed" % task) + + # Create and process dependencies for new tasks + for task in new_tasks: + logger.debug("Checking dependencies for: %s" % str(task)) + try: + task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks) + except Exception, e: + logger.error("Failed processing dependencies for {}: {}".format(task, e)) + task.status = 'failed' + task.job_explanation += 'Task failed to generate dependencies: {}'.format(e) + task.save() + task.socketio_emit_status("failed") + continue + logger.debug("New dependencies: %s" % str(task_dependencies)) + for dep in task_dependencies: + # We recalculate the created time for the moment to ensure the + # dependencies are always sorted in the right order relative to + # the dependent task. + time_delt = len(task_dependencies) - task_dependencies.index(dep) + dep.created = task.created - datetime.timedelta(seconds=1 + time_delt) + dep.status = 'waiting' + dep.save() + waiting_tasks.insert(waiting_tasks.index(task), dep) + if not hasattr(settings, 'UNIT_TEST_IGNORE_TASK_WAIT'): + task.status = 'waiting' + task.save() + + # Rebuild graph + graph = SimpleDAG() + for task in running_tasks: + graph.add_node(task) + for wait_task in waiting_tasks[:50]: + node_dependencies = [] + for node in graph: + if wait_task.is_blocked_by(node['node_object']): + node_dependencies.append(node['node_object']) + graph.add_node(wait_task) + for dependency in node_dependencies: + graph.add_edge(wait_task, dependency) + if settings.DEBUG: + graph.generate_graphviz_plot() + return graph + +def process_graph(graph, task_capacity): + """Given a task dependency graph, start and manage tasks given their + priority and weight. + """ + leaf_nodes = graph.get_leaf_nodes() + running_nodes = filter(lambda x: x['node_object'].status == 'running', leaf_nodes) + running_impact = sum([t['node_object'].task_impact for t in running_nodes]) + ready_nodes = filter(lambda x: x['node_object'].status != 'running', leaf_nodes) + remaining_volume = task_capacity - running_impact + logger.info('Running Nodes: %s; Capacity: %s; Running Impact: %s; ' + 'Remaining Capacity: %s' % + (str(running_nodes), str(task_capacity), + str(running_impact), str(remaining_volume))) + logger.info("Ready Nodes: %s" % str(ready_nodes)) + for task_node in ready_nodes: + node_obj = task_node['node_object'] + # NOTE: This could be used to pass metadata through the task system + # node_args = task_node['metadata'] + impact = node_obj.task_impact + if impact <= remaining_volume or running_impact == 0: + node_dependencies = graph.get_dependents(node_obj) + # Allow other tasks to continue if a job fails, even if they are + # other jobs. + if graph.get_node_type(node_obj) == 'job': + node_dependencies = [] + dependent_nodes = [{'type': graph.get_node_type(node_obj), 'id': node_obj.id}] + \ + [{'type': graph.get_node_type(n['node_object']), + 'id': n['node_object'].id} for n in node_dependencies] + error_handler = handle_work_error.s(subtasks=dependent_nodes) + success_handler = handle_work_success.s(task_actual={'type': graph.get_node_type(node_obj), + 'id': node_obj.id}) + start_status = node_obj.start(error_callback=error_handler, success_callback=success_handler) + if not start_status: + node_obj.status = 'failed' + if node_obj.job_explanation: + node_obj.job_explanation += ' ' + node_obj.job_explanation += 'Task failed pre-start check.' + node_obj.save() + continue + remaining_volume -= impact + running_impact += impact + logger.info('Started Node: %s (capacity hit: %s) ' + 'Remaining Capacity: %s' % + (str(node_obj), str(impact), str(remaining_volume))) + + + +def schedule(): + task_capacity = get_system_task_capacity() + graph = rebuild_graph() + if graph: + process_graph(graph, task_capacity) + diff --git a/awx/main/scheduler/dag_simple.py b/awx/main/scheduler/dag_simple.py index f04c60159a..79b20520e2 100644 --- a/awx/main/scheduler/dag_simple.py +++ b/awx/main/scheduler/dag_simple.py @@ -1,5 +1,12 @@ -from awx.main.models import * # noqa +from awx.main.models import ( + Job, + AdHocCommand, + InventoryUpdate, + ProjectUpdate, + WorkflowJob, + SystemJob, +) class SimpleDAG(object): ''' A simple implementation of a directed acyclic graph ''' diff --git a/awx/main/scheduler/dag_workflow.py b/awx/main/scheduler/dag_workflow.py index 1a8269c064..c891b2ec32 100644 --- a/awx/main/scheduler/dag_workflow.py +++ b/awx/main/scheduler/dag_workflow.py @@ -1,4 +1,6 @@ -from dag_simple import SimpleDAG + +# AWX +from awx.main.scheduler.dag_simple import SimpleDAG class WorkflowDAG(SimpleDAG): def __init__(self, workflow_job=None): @@ -41,8 +43,6 @@ class WorkflowDAG(SimpleDAG): elif job.status in ['successful']: children_success = self.get_dependencies(obj, 'success_nodes') nodes.extend(children_success) - else: - logger.warn("Incorrect graph structure") return [n['node_object'] for n in nodes_found] def is_workflow_done(self): @@ -68,7 +68,5 @@ class WorkflowDAG(SimpleDAG): elif job.status in ['successful']: children_success = self.get_dependencies(obj, 'success_nodes') nodes.extend(children_success) - else: - logger.warn("Incorrect graph structure") return True diff --git a/awx/main/scheduler/tasks.py b/awx/main/scheduler/tasks.py new file mode 100644 index 0000000000..343bdd1546 --- /dev/null +++ b/awx/main/scheduler/tasks.py @@ -0,0 +1,79 @@ + +# Python +import logging +import time + +# Celery +from celery import task + +# AWX +from awx.main.models import UnifiedJob +from awx.main.scheduler import schedule + +logger = logging.getLogger('awx.main.scheduler') + +# TODO: move logic to UnifiedJob model and use bind=True feature of celery. +# Would we need the request loop then? I think so. Even if we get the in-memory +# updated model, the call to schedule() may get stale data. + +@task +def run_job_launch(job_id): + # Wait for job to exist. + # The job is created in a transaction then the message is created, but + # the transaction may not have completed. + + # FIXME: We could generate the message in a Django signal handler. + # OR, we could call an explicit commit in the view and then send the + # message. + + retries = 10 + retry = 0 + while not UnifiedJob.objects.filter(id=job_id).exists(): + time.sleep(0.3) + + if retry >= retries: + logger.error("Failed to process 'job_launch' message for job %d" % job_id) + # ack the message so we don't build up the queue. + # + # The job can still be chosen to run during tower startup or + # when another job is started or completes + return + retry += 1 + + # "Safe" to get the job now since it exists. + # Really, there is a race condition from exists to get + + # TODO: while not loop should call get wrapped in a try except + #job = UnifiedJob.objects.get(id=job_id) + + schedule() + +@task +def run_job_complete(job_id): + # TODO: use list of finished status from jobs.py or unified_jobs.py + finished_status = ['successful', 'error', 'failed', 'completed'] + q = UnifiedJob.objects.filter(id=job_id) + + # Ensure that the job is updated in the database before we call to + # schedule the next job. + retries = 10 + retry = 0 + while True: + # Job not found, most likely deleted. That's fine + if not q.exists(): + logger.warn("Failed to find job '%d' while processing 'job_complete' message. Presume that it was deleted." % job_id) + break + + job = q[0] + if job.status in finished_status: + break + + time.sleep(0.3) + + if retry >= retries: + logger.error("Expected job status '%s' to be one of '%s' while processing 'job_complete' message." % (job.status, finished_status)) + return + retry += 1 + + schedule() + diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 86552b6404..31db196a9b 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -31,9 +31,6 @@ except: # Pexpect import pexpect -# Kombu -from kombu import Connection, Exchange, Queue, Producer - # Celery from celery import Task, task from celery.signals import celeryd_init @@ -50,18 +47,18 @@ from django.contrib.auth.models import User from awx.main.constants import CLOUD_PROVIDERS from awx.main.models import * # noqa from awx.main.models import UnifiedJob -from awx.main.queue import FifoQueue from awx.main.conf import tower_settings from awx.main.task_engine import TaskSerializer, TASK_TIMEOUT_INTERVAL from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url, emit_websocket_notification, check_proot_installed, build_proot_temp_dir, wrap_args_with_proot) +from awx.main.scheduler.dag_workflow import WorkflowDAG __all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate', 'RunAdHocCommand', 'RunWorkflowJob', 'handle_work_error', 'handle_work_success', 'update_inventory_computed_fields', 'send_notifications', 'run_administrative_checks', - 'run_workflow_job'] + 'RunJobLaunch'] HIDDEN_PASSWORD = '**********' @@ -182,14 +179,6 @@ def tower_periodic_scheduler(self): new_unified_job.socketio_emit_status("failed") emit_websocket_notification('/socket.io/schedules', 'schedule_changed', dict(id=schedule.id)) -@task(queue='default') -def notify_task_runner(metadata_dict): - """Add the given task into the Tower task manager's queue, to be consumed - by the task system. - """ - queue = FifoQueue('tower_task_manager') - queue.push(metadata_dict) - def _send_notification_templates(instance, status_str): if status_str not in ['succeeded', 'failed']: raise ValueError("status_str must be either succeeded or failed") @@ -206,17 +195,6 @@ def _send_notification_templates(instance, status_str): job_id=instance.id) -def _send_job_complete_msg(instance): - connection = Connection(settings.BROKER_URL) - exchange = Exchange(settings.SCHEDULER_QUEUE, type='topic') - producer = Producer(connection) - producer.publish({ 'job_id': instance.id, 'msg_type': 'job_complete' }, - serializer='json', - compression='bzip2', - exchange=exchange, - declare=[exchange], - routing_key='scheduler.job.complete') - @task(bind=True, queue='default') def handle_work_success(self, result, task_actual): instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) @@ -225,7 +203,8 @@ def handle_work_success(self, result, task_actual): _send_notification_templates(instance, 'succeeded') - _send_job_complete_msg(instance) + from awx.main.scheduler.tasks import run_job_complete + run_job_complete.delay(instance.id) @task(bind=True, queue='default') def handle_work_error(self, task_id, subtasks=None): @@ -256,8 +235,14 @@ def handle_work_error(self, task_id, subtasks=None): if first_instance: _send_notification_templates(first_instance, 'failed') + # We only send 1 job complete message since all the job completion message + # handling does is trigger the scheduler. If we extend the functionality of + # what the job complete message handler does then we may want to send a + # completion event for each job here. if first_instance: - _send_job_complete_msg(first_instance) + from awx.main.scheduler.tasks import run_job_complete + run_job_complete.delay(first_instance.id) + pass @task(queue='default') def update_inventory_computed_fields(inventory_id, should_update_hosts=True): @@ -323,10 +308,6 @@ class BaseTask(Task): logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt) - def signal_finished(self, pk): - pass - # notify_task_runner(dict(complete=pk)) - def get_path_to(self, *args): ''' Return absolute path relative to this file. @@ -1690,7 +1671,7 @@ class RunWorkflowJob(BaseTask): model = WorkflowJob def run(self, pk, **kwargs): - from awx.main.management.commands.run_task_system import WorkflowDAG + print("I'm a running a workflow job") ''' Run the job/task and capture its output. ''' diff --git a/awx/main/tests/base.py b/awx/main/tests/base.py index 6b35297a07..34be4081b8 100644 --- a/awx/main/tests/base.py +++ b/awx/main/tests/base.py @@ -30,7 +30,7 @@ from django.utils.encoding import force_text # AWX from awx.main.models import * # noqa -from awx.main.management.commands.run_task_system import run_taskmanager +from awx.main.management.commands.run_callback_receiver import CallbackReceiver from awx.main.utils import get_ansible_version from awx.main.task_engine import TaskEngager as LicenseWriter from awx.sso.backends import LDAPSettings @@ -654,18 +654,6 @@ class BaseTestMixin(MockCommonlySlowTestMixin): u'expected no traceback, got:\n%s' % job.result_traceback) - - def start_taskmanager(self, command_port): - self.start_redis() - self.taskmanager_process = Process(target=run_taskmanager, - args=(command_port,)) - self.taskmanager_process.start() - - def terminate_taskmanager(self): - if hasattr(self, 'taskmanager_process'): - self.taskmanager_process.terminate() - self.stop_redis() - class BaseTest(BaseTestMixin, django.test.TestCase): ''' Base class for unit tests. diff --git a/awx/main/tests/unit/commands/test_run_task_system.py b/awx/main/tests/unit/scheduler/test_dag.py similarity index 97% rename from awx/main/tests/unit/commands/test_run_task_system.py rename to awx/main/tests/unit/scheduler/test_dag.py index bc62394b21..84fb2d37f2 100644 --- a/awx/main/tests/unit/commands/test_run_task_system.py +++ b/awx/main/tests/unit/scheduler/test_dag.py @@ -1,10 +1,12 @@ -from awx.main.management.commands.run_task_system import ( - SimpleDAG, - WorkflowDAG, -) + +# Python +import pytest + +# AWX +from awx.main.scheduler.dag_simple import SimpleDAG +from awx.main.scheduler.dag_workflow import WorkflowDAG from awx.main.models import Job from awx.main.models.workflow import WorkflowJobNode -import pytest @pytest.fixture def dag_root(): diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 8a65d7d322..20a80ecca4 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -339,10 +339,11 @@ CELERYD_TASK_SOFT_TIME_LIMIT = None CELERYBEAT_SCHEDULER = 'celery.beat.PersistentScheduler' CELERYBEAT_MAX_LOOP_INTERVAL = 60 CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' +CELERY_IMPORTS = ('awx.main.scheduler.tasks',) CELERY_QUEUES = ( Queue('default', Exchange('default'), routing_key='default'), Queue('jobs', Exchange('jobs'), routing_key='jobs'), - #Queue('scheduler', Exchange('scheduler'), routing_key='scheduler.job.#'), + Queue('scheduler', Exchange('scheduler', type='topic'), routing_key='scheduler.job.#'), # Projects use a fanout queue, this isn't super well supported Broadcast('projects'), ) @@ -354,7 +355,11 @@ CELERY_ROUTES = ({'awx.main.tasks.run_job': {'queue': 'jobs', 'awx.main.tasks.run_ad_hoc_command': {'queue': 'jobs', 'routing_key': 'jobs'}, 'awx.main.tasks.run_system_job': {'queue': 'jobs', - 'routing_key': 'jobs'}}) + 'routing_key': 'jobs'}, + 'awx.main.scheduler.tasks.run_job_launch': {'queue': 'scheduler', + 'routing_key': 'scheduler.job.launch'}, + 'awx.main.scheduler.tasks.run_job_complete': {'queue': 'scheduler', + 'routing_key': 'scheduler.job.complete'},}) CELERYBEAT_SCHEDULE = { 'tower_scheduler': { @@ -1040,7 +1045,7 @@ LOGGING = { 'handlers': ['console', 'file', 'socketio_service'], 'propagate': False }, - 'awx.main.commands.run_task_system': { + 'awx.main.tasks': { 'handlers': ['console', 'file', 'task_system'], 'propagate': False }, From 04f69727f214b1286904cac2557a852126e3e3c7 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 22 Sep 2016 15:36:58 -0400 Subject: [PATCH 12/37] fully message driven job execution TODO: * Need a distributed lock (leverage postgres) * Less memory-intensive graph representation * Maybe serializer/deserializer graph to database * Iterative graph building instead of full rebuild. --- awx/main/models/unified_jobs.py | 32 ++++++++++----- awx/main/models/workflow.py | 8 ++++ awx/main/scheduler/__init__.py | 72 ++++++++++++++++++++++++--------- awx/main/tasks.py | 23 +++++++---- awx/settings/defaults.py | 2 +- 5 files changed, 100 insertions(+), 37 deletions(-) diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index a81bcb6aca..bcdde810e9 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -798,34 +798,43 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique status=self.status, traceback=self.result_traceback) - def start(self, error_callback, success_callback, **kwargs): - ''' - Start the task running via Celery. - ''' - task_class = self._get_task_class() + def pre_start(self, **kwargs): if not self.can_start: self.job_explanation = u'%s is not in a startable status: %s, expecting one of %s' % (self._meta.verbose_name, self.status, str(('new', 'waiting'))) self.save(update_fields=['job_explanation']) - return False + return (False, None) + needed = self.get_passwords_needed_to_start() try: start_args = json.loads(decrypt_field(self, 'start_args')) except Exception: start_args = None + if start_args in (None, ''): start_args = kwargs + opts = dict([(field, start_args.get(field, '')) for field in needed]) + if not all(opts.values()): missing_fields = ', '.join([k for k,v in opts.items() if not v]) self.job_explanation = u'Missing needed fields: %s.' % missing_fields self.save(update_fields=['job_explanation']) - return False - #extra_data = dict([(field, kwargs[field]) for field in kwargs - # if field not in needed]) + return (False, None) + if 'extra_vars' in kwargs: self.handle_extra_data(kwargs['extra_vars']) - task_class().apply_async((self.pk,), opts, link_error=error_callback, link=success_callback) - return True + + return (True, opts) + + def start(self, error_callback, success_callback, **kwargs): + ''' + Start the task running via Celery. + ''' + task_class = self._get_task_class() + (res, opts) = self.pre_start(**kwargs) + if res: + task_class().apply_async((self.pk,), opts, link_error=error_callback, link=success_callback) + return res def signal_start(self, **kwargs): """Notify the task runner system to begin work on this task.""" @@ -852,6 +861,7 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique self.update_fields(start_args=json.dumps(kwargs), status='pending') self.socketio_emit_status("pending") + print("Running job launch for job %s" % self.name) from awx.main.scheduler.tasks import run_job_launch run_job_launch.delay(self.id) diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py index 3c95fb17e8..68066ee58a 100644 --- a/awx/main/models/workflow.py +++ b/awx/main/models/workflow.py @@ -240,3 +240,11 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, JobNotificationMixin, Workflow def get_notification_friendly_name(self): return "Workflow Job" + def start(self, *args, **kwargs): + (res, opts) = self.pre_start(**kwargs) + if res: + self.status = 'running' + self.save() + self.socketio_emit_status("running") + return res + diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 1c3a1bc515..f10cb2dcd6 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -4,13 +4,14 @@ # Python import datetime import logging +import struct, fcntl, os # Django from django.conf import settings +from django.db import transaction # AWX from awx.main.models import * # noqa -from awx.main.tasks import handle_work_error, handle_work_success from awx.main.utils import get_system_task_capacity from awx.main.scheduler.dag_simple import SimpleDAG from awx.main.scheduler.dag_workflow import WorkflowDAG @@ -47,8 +48,8 @@ def get_running_workflow_jobs(): WorkflowJob.objects.filter(status='running')] return graph_workflow_jobs -def do_spawn_workflow_jobs(): - workflow_jobs = get_running_workflow_jobs() +def spawn_workflow_graph_jobs(workflow_jobs): + # TODO: Consider using transaction.atomic for workflow_job in workflow_jobs: dag = WorkflowDAG(workflow_job) spawn_nodes = dag.bfs_nodes_to_run() @@ -69,6 +70,16 @@ def do_spawn_workflow_jobs(): # TODO: should we emit a status on the socket here similar to tasks.py tower_periodic_scheduler() ? #emit_websocket_notification('/socket.io/jobs', '', dict(id=)) +# See comment in tasks.py::RunWorkflowJob::run() +def process_finished_workflow_jobs(workflow_jobs): + for workflow_job in workflow_jobs: + dag = WorkflowDAG(workflow_job) + if dag.is_workflow_done(): + with transaction.atomic(): + # TODO: detect if wfj failed + workflow_job.status = 'completed' + workflow_job.save() + workflow_job.socketio_emit_status('completed') def rebuild_graph(): """Regenerate the task graph by refreshing known tasks from Tower, purging @@ -88,8 +99,6 @@ def rebuild_graph(): logger.warn("Ignoring celery task inspector") active_task_queues = None - do_spawn_workflow_jobs() - all_sorted_tasks = get_tasks() if not len(all_sorted_tasks): return None @@ -106,12 +115,13 @@ def rebuild_graph(): return None running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks) + running_celery_tasks = filter(lambda t: type(t) != WorkflowJob, running_tasks) waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks) new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks) # Check running tasks and make sure they are active in celery logger.debug("Active celery tasks: " + str(active_tasks)) - for task in list(running_tasks): + for task in list(running_celery_tasks): if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): # NOTE: Pull status again and make sure it didn't finish in # the meantime? @@ -122,7 +132,7 @@ def rebuild_graph(): )) task.save() task.socketio_emit_status("failed") - running_tasks.pop(running_tasks.index(task)) + running_tasks.pop(task) logger.error("Task %s appears orphaned... marking as failed" % task) # Create and process dependencies for new tasks @@ -171,6 +181,8 @@ def process_graph(graph, task_capacity): """Given a task dependency graph, start and manage tasks given their priority and weight. """ + from awx.main.tasks import handle_work_error, handle_work_success + leaf_nodes = graph.get_leaf_nodes() running_nodes = filter(lambda x: x['node_object'].status == 'running', leaf_nodes) running_impact = sum([t['node_object'].task_impact for t in running_nodes]) @@ -190,33 +202,57 @@ def process_graph(graph, task_capacity): node_dependencies = graph.get_dependents(node_obj) # Allow other tasks to continue if a job fails, even if they are # other jobs. - if graph.get_node_type(node_obj) == 'job': + + node_type = graph.get_node_type(node_obj) + if node_type == 'job': + # clear dependencies because a job can block (not necessarily + # depend) on other jobs that share the same job template node_dependencies = [] + + # Make the workflow_job look like it's started by setting status to + # running, but don't make a celery Task for it. + # Introduce jobs from the workflow so they are candidates to run. + # Call process_graph() again to allow choosing for run, the + # created candidate jobs. + elif node_type == 'workflow_job': + node_obj.start() + spawn_workflow_graph_jobs([node_obj]) + return process_graph(graph, task_capacity) + dependent_nodes = [{'type': graph.get_node_type(node_obj), 'id': node_obj.id}] + \ [{'type': graph.get_node_type(n['node_object']), 'id': n['node_object'].id} for n in node_dependencies] error_handler = handle_work_error.s(subtasks=dependent_nodes) success_handler = handle_work_success.s(task_actual={'type': graph.get_node_type(node_obj), 'id': node_obj.id}) - start_status = node_obj.start(error_callback=error_handler, success_callback=success_handler) - if not start_status: - node_obj.status = 'failed' - if node_obj.job_explanation: - node_obj.job_explanation += ' ' - node_obj.job_explanation += 'Task failed pre-start check.' - node_obj.save() - continue + with transaction.atomic(): + start_status = node_obj.start(error_callback=error_handler, success_callback=success_handler) + if not start_status: + node_obj.status = 'failed' + if node_obj.job_explanation: + node_obj.job_explanation += ' ' + node_obj.job_explanation += 'Task failed pre-start check.' + node_obj.save() + continue remaining_volume -= impact running_impact += impact logger.info('Started Node: %s (capacity hit: %s) ' 'Remaining Capacity: %s' % (str(node_obj), str(impact), str(remaining_volume))) - - def schedule(): + lockfile = open("/tmp/tower_scheduler.lock", "w") + fcntl.lockf(lockfile, fcntl.LOCK_EX) + task_capacity = get_system_task_capacity() + + workflow_jobs = get_running_workflow_jobs() + process_finished_workflow_jobs(workflow_jobs) + spawn_workflow_graph_jobs(workflow_jobs) + graph = rebuild_graph() if graph: process_graph(graph, task_capacity) + fcntl.lockf(lockfile, fcntl.LOCK_UN) + diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 31db196a9b..bc09c58f60 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -1665,21 +1665,30 @@ class RunSystemJob(BaseTask): def build_cwd(self, instance, **kwargs): return settings.BASE_DIR +''' class RunWorkflowJob(BaseTask): name = 'awx.main.tasks.run_workflow_job' model = WorkflowJob def run(self, pk, **kwargs): - print("I'm a running a workflow job") - ''' - Run the job/task and capture its output. - ''' - pass + #Run the job/task and capture its output. instance = self.update_model(pk, status='running', celery_task_id=self.request.id) instance.socketio_emit_status("running") - # FIXME: Detect workflow run completion + # FIXME: Currently, the workflow job busy waits until the graph run is + # complete. Instead, the workflow job should return or never even run, + # because all of the "launch logic" can be done schedule(). + + # However, other aspects of our system depend on a 1-1 relationship + # between a Job and a Celery Task. + # + # * If we let the workflow job task (RunWorkflowJob.run()) complete + # then how do we trigger the handle_work_error and + # handle_work_success subtasks? + # + # * How do we handle the recovery process? (i.e. there is an entry in + # the database but not in celery). while True: dag = WorkflowDAG(instance) if dag.is_workflow_done(): @@ -1689,4 +1698,4 @@ class RunWorkflowJob(BaseTask): time.sleep(1) instance.socketio_emit_status(instance.status) # TODO: Handle cancel - +''' diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 20a80ecca4..445ce8924f 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -360,7 +360,7 @@ CELERY_ROUTES = ({'awx.main.tasks.run_job': {'queue': 'jobs', 'routing_key': 'scheduler.job.launch'}, 'awx.main.scheduler.tasks.run_job_complete': {'queue': 'scheduler', 'routing_key': 'scheduler.job.complete'},}) - + CELERYBEAT_SCHEDULE = { 'tower_scheduler': { 'task': 'awx.main.tasks.tower_periodic_scheduler', From 3a8033dec4c9fecaa34b39791a5ddc33bbdccbd7 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 22 Sep 2016 17:54:27 -0400 Subject: [PATCH 13/37] cheesy global lock --- awx/main/scheduler/__init__.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index f10cb2dcd6..e670fa6300 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -241,18 +241,18 @@ def process_graph(graph, task_capacity): (str(node_obj), str(impact), str(remaining_volume))) def schedule(): - lockfile = open("/tmp/tower_scheduler.lock", "w") - fcntl.lockf(lockfile, fcntl.LOCK_EX) + with transaction.atomic(): + # Lock + Instance.objects.select_for_update().all()[0] - task_capacity = get_system_task_capacity() + task_capacity = get_system_task_capacity() - workflow_jobs = get_running_workflow_jobs() - process_finished_workflow_jobs(workflow_jobs) - spawn_workflow_graph_jobs(workflow_jobs) + workflow_jobs = get_running_workflow_jobs() + process_finished_workflow_jobs(workflow_jobs) + spawn_workflow_graph_jobs(workflow_jobs) - graph = rebuild_graph() - if graph: - process_graph(graph, task_capacity) - - fcntl.lockf(lockfile, fcntl.LOCK_UN) + graph = rebuild_graph() + if graph: + process_graph(graph, task_capacity) + # Unlock, due to transaction ending From 89250dcf3686ac06f752b94fb3ac6f070ccaf9cc Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 27 Sep 2016 16:04:00 -0400 Subject: [PATCH 14/37] removed wait_task look restriction --- awx/main/scheduler/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index e670fa6300..24958c0e0d 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -165,7 +165,7 @@ def rebuild_graph(): graph = SimpleDAG() for task in running_tasks: graph.add_node(task) - for wait_task in waiting_tasks[:50]: + for wait_task in waiting_tasks: node_dependencies = [] for node in graph: if wait_task.is_blocked_by(node['node_object']): From d65120538dc6bd75d994f2d1497258c5dcd15c97 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 27 Sep 2016 16:05:30 -0400 Subject: [PATCH 15/37] scheduler messages need not be durable --- awx/settings/defaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 445ce8924f..2e3858e331 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -343,7 +343,7 @@ CELERY_IMPORTS = ('awx.main.scheduler.tasks',) CELERY_QUEUES = ( Queue('default', Exchange('default'), routing_key='default'), Queue('jobs', Exchange('jobs'), routing_key='jobs'), - Queue('scheduler', Exchange('scheduler', type='topic'), routing_key='scheduler.job.#'), + Queue('scheduler', Exchange('scheduler', type='topic'), routing_key='scheduler.job.#', durable=False), # Projects use a fanout queue, this isn't super well supported Broadcast('projects'), ) From 0007ef2546378ac957a0016099418545b2d6ac91 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 28 Sep 2016 09:58:19 -0400 Subject: [PATCH 16/37] Add --skip-errors option to migrate_to_database_settings command, allow any false/null value for 'off' in pendo setting. --- .../commands/migrate_to_database_settings.py | 86 ++++++++++++------- awx/ui/conf.py | 11 ++- 2 files changed, 67 insertions(+), 30 deletions(-) diff --git a/awx/conf/management/commands/migrate_to_database_settings.py b/awx/conf/management/commands/migrate_to_database_settings.py index 36fd783475..f708ae4a1d 100644 --- a/awx/conf/management/commands/migrate_to_database_settings.py +++ b/awx/conf/management/commands/migrate_to_database_settings.py @@ -38,6 +38,13 @@ class Command(BaseCommand): default=False, help='Only show which settings would be commented/migrated.', ) + parser.add_argument( + '--skip-errors', + action='store_true', + dest='skip_errors', + default=False, + help='Skip over settings that would raise an error when commenting/migrating.', + ) parser.add_argument( '--no-comment', action='store_true', @@ -56,6 +63,7 @@ class Command(BaseCommand): def handle(self, *args, **options): self.verbosity = int(options.get('verbosity', 1)) self.dry_run = bool(options.get('dry_run', False)) + self.skip_errors = bool(options.get('skip_errors', False)) self.no_comment = bool(options.get('no_comment', False)) self.backup_suffix = options.get('backup_suffix', '') self.categories = options.get('category', None) or ['all'] @@ -134,17 +142,14 @@ class Command(BaseCommand): def _check_if_needs_comment(self, patterns, setting): files_to_comment = [] - try: - # If any diffs are returned, this setting needs to be commented. - diffs = comment_assignments(patterns, setting, dry_run=True) - if setting == 'LICENSE': - diffs.extend(self._comment_license_file(dry_run=True)) - for diff in diffs: - for line in diff.splitlines(): - if line.startswith('+++ '): - files_to_comment.append(line[4:]) - except Exception as e: - raise CommandError('Error commenting {0}: {1!r}'.format(setting, e)) + # If any diffs are returned, this setting needs to be commented. + diffs = comment_assignments(patterns, setting, dry_run=True) + if setting == 'LICENSE': + diffs.extend(self._comment_license_file(dry_run=True)) + for diff in diffs: + for line in diff.splitlines(): + if line.startswith('+++ '): + files_to_comment.append(line[4:]) return files_to_comment def _check_if_needs_migration(self, setting): @@ -163,26 +168,39 @@ class Command(BaseCommand): return current_value return empty - def _display_tbd(self, setting, files_to_comment, migrate_value): + def _display_tbd(self, setting, files_to_comment, migrate_value, comment_error=None, migrate_error=None): if self.verbosity >= 1: if files_to_comment: if migrate_value is not empty: action = 'Migrate + Comment' else: action = 'Comment' + if comment_error or migrate_error: + action = self.style.ERROR('{} (skipped)'.format(action)) + else: + action = self.style.OK(action) self.stdout.write(' {}: {}'.format( self.style.LABEL(setting), - self.style.OK(action), + action, )) if self.verbosity >= 2: - if migrate_value is not empty: + if migrate_error: + self.stdout.write(' - Migrate value: {}'.format( + self.style.ERROR(migrate_error), + )) + elif migrate_value is not empty: self.stdout.write(' - Migrate value: {}'.format( self.style.VALUE(repr(migrate_value)), )) - for file_to_comment in files_to_comment: - self.stdout.write(' - Comment in: {}'.format( - self.style.VALUE(file_to_comment), + if comment_error: + self.stdout.write(' - Comment: {}'.format( + self.style.ERROR(comment_error), )) + elif files_to_comment: + for file_to_comment in files_to_comment: + self.stdout.write(' - Comment in: {}'.format( + self.style.VALUE(file_to_comment), + )) else: if self.verbosity >= 2: self.stdout.write(' {}: {}'.format( @@ -255,15 +273,33 @@ class Command(BaseCommand): to_migrate = collections.OrderedDict() to_comment = collections.OrderedDict() for name in registered_settings: - files_to_comment = self._check_if_needs_comment(patterns, name) + comment_error, migrate_error = None, None + files_to_comment = [] + try: + files_to_comment = self._check_if_needs_comment(patterns, name) + except Exception as e: + comment_error = 'Error commenting {0}: {1!r}'.format(name, e) + if not self.skip_errors: + raise CommandError(comment_error) if files_to_comment: to_comment[name] = files_to_comment migrate_value = empty if files_to_comment: migrate_value = self._check_if_needs_migration(name) if migrate_value is not empty: - to_migrate[name] = migrate_value - self._display_tbd(name, files_to_comment, migrate_value) + field = settings_registry.get_setting_field(name) + assert not field.read_only + try: + data = field.to_representation(migrate_value) + setting_value = field.run_validation(data) + db_value = field.to_representation(setting_value) + to_migrate[name] = db_value + except Exception as e: + to_comment.pop(name) + migrate_error = 'Unable to assign value {0!r} to setting "{1}: {2!s}".'.format(migrate_value, name, e) + if not self.skip_errors: + raise CommandError(migrate_error) + self._display_tbd(name, files_to_comment, migrate_value, comment_error, migrate_error) if self.verbosity == 1 and not to_migrate and not to_comment: self.stdout.write(' No settings found to migrate or comment!') @@ -275,15 +311,7 @@ class Command(BaseCommand): self.stdout.write(self.style.HEADING('Migrating settings to database:')) if not to_migrate: self.stdout.write(' No settings to migrate!') - for name, value in to_migrate.items(): - field = settings_registry.get_setting_field(name) - assert not field.read_only - try: - data = field.to_representation(value) - setting_value = field.run_validation(data) - db_value = field.to_representation(setting_value) - except Exception as e: - raise CommandError('Unable to assign value {0!r} to setting "{1}: {2!s}".'.format(value, name, e)) + for name, db_value in to_migrate.items(): display_value = json.dumps(db_value, indent=4) # Always encode "raw" strings as JSON. if isinstance(db_value, basestring): diff --git a/awx/ui/conf.py b/awx/ui/conf.py index 46fd4288c4..a92eeea35c 100644 --- a/awx/ui/conf.py +++ b/awx/ui/conf.py @@ -8,9 +8,18 @@ from django.utils.translation import ugettext_lazy as _ from awx.conf import fields, register +class PendoTrackingStateField(fields.ChoiceField): + + def to_internal_value(self, data): + # Any false/null values get converted to 'off'. + if data in fields.NullBooleanField.FALSE_VALUES or data in fields.NullBooleanField.NULL_VALUES: + return 'off' + return super(PendoTrackingStateField, self).to_internal_value(data) + + register( 'PENDO_TRACKING_STATE', - field_class=fields.ChoiceField, + field_class=PendoTrackingStateField, choices=[ ('off', _('Off')), ('anonymous', _('Anonymous')), From 0d538f8b0f5065bcb31264c7d63ca1cf139a8dc4 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 28 Sep 2016 10:23:45 -0400 Subject: [PATCH 17/37] Revert "removed wait_task look restriction" This reverts commit f159fd45406c6af2aacc48a44e2a3993bfa19ce8. --- awx/main/scheduler/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index 24958c0e0d..e670fa6300 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -165,7 +165,7 @@ def rebuild_graph(): graph = SimpleDAG() for task in running_tasks: graph.add_node(task) - for wait_task in waiting_tasks: + for wait_task in waiting_tasks[:50]: node_dependencies = [] for node in graph: if wait_task.is_blocked_by(node['node_object']): From 0ce7b31502eb23e0ea18998444cf105030274727 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 28 Sep 2016 11:04:35 -0400 Subject: [PATCH 18/37] Fix default value validation for LDAP/SAML settings to prevent warnings. --- awx/conf/fields.py | 5 +++++ awx/sso/conf.py | 3 +++ awx/sso/fields.py | 6 +++++- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/awx/conf/fields.py b/awx/conf/fields.py index a560d3a637..ae299137e6 100644 --- a/awx/conf/fields.py +++ b/awx/conf/fields.py @@ -34,6 +34,11 @@ class URLField(CharField): validator_kwargs['schemes'] = schemes self.validators.append(URLValidator(**validator_kwargs)) + def to_representation(self, value): + if value is None: + return '' + return super(URLField, self).to_representation(value) + def run_validators(self, value): if self.allow_plain_hostname: try: diff --git a/awx/sso/conf.py b/awx/sso/conf.py index 264b609367..e0842f6031 100644 --- a/awx/sso/conf.py +++ b/awx/sso/conf.py @@ -169,6 +169,7 @@ register( field_class=fields.URLField, schemes=('ldap', 'ldaps'), allow_blank=True, + default='', label=_('LDAP Server URI'), help_text=_('URI to connect to LDAP server, such as "ldap://ldap.example.com:389" ' '(non-SSL) or "ldaps://ldap.example.com:636" (SSL). LDAP authentication ' @@ -880,6 +881,7 @@ register( register( 'SOCIAL_AUTH_SAML_TECHNICAL_CONTACT', field_class=fields.SAMLContactField, + allow_blank=True, default={}, label=_('SAML Service Provider Technical Contact'), help_text=_('Configure this setting with your contact information.'), @@ -894,6 +896,7 @@ register( register( 'SOCIAL_AUTH_SAML_SUPPORT_CONTACT', field_class=fields.SAMLContactField, + allow_blank=True, default={}, label=_('SAML Service Provider Support Contact'), help_text=_('Configure this setting with your contact information.'), diff --git a/awx/sso/fields.py b/awx/sso/fields.py index 6655ad3523..a0d472756e 100644 --- a/awx/sso/fields.py +++ b/awx/sso/fields.py @@ -349,6 +349,10 @@ class BaseDictWithChildField(fields.DictField): } allow_unknown_keys = False + def __init__(self, *args, **kwargs): + self.allow_blank = kwargs.pop('allow_blank', False) + super(BaseDictWithChildField, self).__init__(*args, **kwargs) + def to_representation(self, value): value = super(BaseDictWithChildField, self).to_representation(value) for k, v in value.items(): @@ -367,7 +371,7 @@ class BaseDictWithChildField(fields.DictField): continue elif key not in data: missing_keys.add(key) - if missing_keys: + if missing_keys and (data or not self.allow_blank): keys_display = json.dumps(list(missing_keys)).lstrip('[').rstrip(']') self.fail('missing_keys', missing_keys=keys_display) if not self.allow_unknown_keys: From f6c50cc63a8946ae81c3c17effb1a1ff5f5f1308 Mon Sep 17 00:00:00 2001 From: Graham Mainwaring Date: Wed, 28 Sep 2016 12:38:37 -0400 Subject: [PATCH 19/37] Update Makefile so that ui-release does not get rebuilt over and over when troubleshooting build/release processes --- .gitignore | 3 ++- Makefile | 32 +++++++++++++++++++------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index 20e90fc35c..afd8aa7187 100644 --- a/.gitignore +++ b/.gitignore @@ -51,8 +51,9 @@ __pycache__ /tmp npm-debug.log -# UI build debugging +# UI build flag files awx/ui/.deps_built +awx/ui/.release_built # Testing .cache diff --git a/Makefile b/Makefile index fd9d87cd2e..b00b8b3536 100644 --- a/Makefile +++ b/Makefile @@ -170,6 +170,10 @@ ifeq ($(DISTRO),ubuntu) SETUP_INSTALL_ARGS += --install-layout=deb endif +# UI flag files +UI_DEPS_FLAG_FILE = awx/ui/.deps_built +UI_RELEASE_FLAG_FILE = awx/ui/.release_built + .DEFAULT_GOAL := build .PHONY: clean clean-tmp clean-venv rebase push requirements requirements_dev \ @@ -213,7 +217,8 @@ clean-bundle: clean-ui: rm -rf awx/ui/static/ rm -rf awx/ui/node_modules/ - rm -f awx/ui/.deps_built + rm -f $(UI_DEPS_FLAG_FILE) + rm -f $(UI_RELEASE_FLAG_FILE) clean-tmp: rm -rf tmp/ @@ -224,7 +229,6 @@ clean-venv: # Remove temporary build files, compiled Python files. clean: clean-rpm clean-deb clean-ui clean-tar clean-packer clean-bundle rm -rf awx/lib/site-packages - rm -rf awx/lib/.deps_built rm -rf dist/* rm -rf tmp mkdir tmp @@ -482,32 +486,35 @@ test_jenkins : test_coverage # UI TASKS # -------------------------------------- -ui-deps-built: awx/ui/package.json +$(UI_DEPS_FLAG_FILE): awx/ui/package.json $(NPM_BIN) --unsafe-perm --prefix awx/ui install awx/ui - touch awx/ui/.deps_built + touch $(UI_DEPS_FLAG_FILE) -ui-docker-machine: ui-deps-built +ui-docker-machine: $(UI_DEPS_FLAG_FILE) $(NPM_BIN) --prefix awx/ui run build-docker-machine -ui-docker: ui-deps-built +ui-docker: $(UI_DEPS_FLAG_FILE) $(NPM_BIN) --prefix awx/ui run build-docker-cid -ui-release: ui-deps-built - $(NPM_BIN) --prefix awx/ui run build-release +ui-release: $(UI_RELEASE_FLAG_FILE) -ui-test: ui-deps-built +$(UI_RELEASE_FLAG_FILE): $(UI_DEPS_FLAG_FILE) + $(NPM_BIN) --prefix awx/ui run build-release + touch $(UI_RELEASE_FLAG_FILE) + +ui-test: $(UI_DEPS_FLAG_FILE) $(NPM_BIN) --prefix awx/ui run test -ui-test-ci: ui-deps-built +ui-test-ci: $(UI_DEPS_FLAG_FILE) $(NPM_BIN) --prefix awx/ui run test:ci testjs_ci: echo "Update UI unittests later" #ui-test-ci -jshint: ui-deps-built +jshint: $(UI_DEPS_FLAG_FILE) grunt --gruntfile awx/ui/Gruntfile.js jshint #Depends on node 6.x and npm 3.x installed on Jenkins slave -ui-test-saucelabs: ui-deps-built +ui-test-saucelabs: $(UI_DEPS_FLAG_FILE) $(NPM_BIN) --prefix awx/ui run test:saucelabs # END UI TASKS @@ -773,7 +780,6 @@ docker-compose-build: MACHINE?=default docker-clean: - rm -f awx/lib/.deps_built eval $$(docker-machine env $(MACHINE)) $(foreach container_id,$(shell docker ps -f name=tools_tower -aq),docker stop $(container_id); docker rm -f $(container_id);) -docker images | grep "tower_devel" | awk '{print $3}' | xargs docker rmi From 38f5ae21f8ffb513c1ae78624d3577c844934549 Mon Sep 17 00:00:00 2001 From: Aaron Tan Date: Wed, 28 Sep 2016 14:14:29 -0400 Subject: [PATCH 20/37] Set up post_process flag for JobEvent save in callback receiver. --- awx/main/management/commands/run_callback_receiver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/awx/main/management/commands/run_callback_receiver.py b/awx/main/management/commands/run_callback_receiver.py index dcb3906013..d2b89cd44d 100644 --- a/awx/main/management/commands/run_callback_receiver.py +++ b/awx/main/management/commands/run_callback_receiver.py @@ -117,7 +117,7 @@ class CallbackBrokerWorker(ConsumerMixin): else: print("Cache hit") j.parent_id = parent_id - j.save() + j.save(post_process=True) if event_uuid: cache.set("{}_{}".format(payload['job_id'], event_uuid), j.id, 300) except DatabaseError as e: From eaa6567cf312174f1c7b8d55f2b456545a75d1ed Mon Sep 17 00:00:00 2001 From: Aaron Tan Date: Wed, 28 Sep 2016 15:45:23 -0400 Subject: [PATCH 21/37] Convert notification_subject from byte string to unicode. --- awx/main/models/notifications.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/awx/main/models/notifications.py b/awx/main/models/notifications.py index 442b5dc2c8..a9dbcdebdc 100644 --- a/awx/main/models/notifications.py +++ b/awx/main/models/notifications.py @@ -181,11 +181,11 @@ class JobNotificationMixin(object): def _build_notification_message(self, status_str): notification_body = self.notification_data() - notification_subject = "{} #{} '{}' {} on Ansible Tower: {}".format(self.get_notification_friendly_name(), - self.id, - self.name, - status_str, - notification_body['url']) + notification_subject = u"{} #{} '{}' {} on Ansible Tower: {}".format(self.get_notification_friendly_name(), + self.id, + self.name, + status_str, + notification_body['url']) notification_body['friendly_name'] = self.get_notification_friendly_name() return (notification_subject, notification_body) From 2f205a6862633e2b86d1c45fa57f948fcb62d188 Mon Sep 17 00:00:00 2001 From: AlanCoding Date: Wed, 28 Sep 2016 16:02:00 -0400 Subject: [PATCH 22/37] show capabilities on launch and relaunch, remove unintended capabilities --- awx/api/serializers.py | 1 - awx/api/views.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 753fdad8b6..a170329875 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -1075,7 +1075,6 @@ class InventoryDetailSerializer(InventorySerializer): class InventoryScriptSerializer(InventorySerializer): - show_capabilities = ['copy', 'edit', 'delete'] class Meta: fields = () diff --git a/awx/api/views.py b/awx/api/views.py index 23dca79c80..3fc668e615 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -2282,7 +2282,7 @@ class JobTemplateLaunch(RetrieveAPIView, GenericAPIView): else: data = OrderedDict() data['ignored_fields'] = ignored_fields - data.update(JobSerializer(new_job).to_representation(new_job)) + data.update(JobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job)) data['job'] = new_job.id return Response(data, status=status.HTTP_201_CREATED) @@ -2965,7 +2965,7 @@ class JobRelaunch(RetrieveAPIView, GenericAPIView): data = dict(passwords_needed_to_start=new_job.passwords_needed_to_start) return Response(data, status=status.HTTP_400_BAD_REQUEST) else: - data = JobSerializer(new_job).data + data = JobSerializer(new_job, context=self.get_serializer_context()).data # Add job key to match what old relaunch returned. data['job'] = new_job.id headers = {'Location': new_job.get_absolute_url()} @@ -3423,7 +3423,7 @@ class AdHocCommandRelaunch(GenericAPIView): data = dict(passwords_needed_to_start=new_ad_hoc_command.passwords_needed_to_start) return Response(data, status=status.HTTP_400_BAD_REQUEST) else: - data = AdHocCommandSerializer(new_ad_hoc_command).data + data = AdHocCommandSerializer(new_ad_hoc_command, context=self.get_serializer_context()).data # Add ad_hoc_command key to match what was previously returned. data['ad_hoc_command'] = new_ad_hoc_command.id headers = {'Location': new_ad_hoc_command.get_absolute_url()} From bdd444fb44c120cb2bb808060761c5d01ad3d132 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 29 Sep 2016 09:53:36 -0400 Subject: [PATCH 23/37] removed print --- awx/main/models/unified_jobs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index bcdde810e9..d304f24d79 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -861,7 +861,6 @@ class UnifiedJob(PolymorphicModel, PasswordFieldsModel, CommonModelNameNotUnique self.update_fields(start_args=json.dumps(kwargs), status='pending') self.socketio_emit_status("pending") - print("Running job launch for job %s" % self.name) from awx.main.scheduler.tasks import run_job_launch run_job_launch.delay(self.id) From fbc1dff4de8f59ecfd763231d7c761c466d90b11 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 29 Sep 2016 10:23:00 -0400 Subject: [PATCH 24/37] flake8 fixes --- awx/main/scheduler/__init__.py | 1 - awx/main/scheduler/dag_simple.py | 12 ++++++------ awx/main/tasks.py | 1 - awx/main/tests/base.py | 2 -- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index e670fa6300..03c87d8ddb 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -4,7 +4,6 @@ # Python import datetime import logging -import struct, fcntl, os # Django from django.conf import settings diff --git a/awx/main/scheduler/dag_simple.py b/awx/main/scheduler/dag_simple.py index 79b20520e2..aeb0ff759e 100644 --- a/awx/main/scheduler/dag_simple.py +++ b/awx/main/scheduler/dag_simple.py @@ -1,11 +1,11 @@ from awx.main.models import ( - Job, - AdHocCommand, - InventoryUpdate, - ProjectUpdate, - WorkflowJob, - SystemJob, + Job, + AdHocCommand, + InventoryUpdate, + ProjectUpdate, + WorkflowJob, + SystemJob, ) class SimpleDAG(object): diff --git a/awx/main/tasks.py b/awx/main/tasks.py index bc09c58f60..10fb82c67a 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -52,7 +52,6 @@ from awx.main.task_engine import TaskSerializer, TASK_TIMEOUT_INTERVAL from awx.main.utils import (get_ansible_version, get_ssh_version, decrypt_field, update_scm_url, emit_websocket_notification, check_proot_installed, build_proot_temp_dir, wrap_args_with_proot) -from awx.main.scheduler.dag_workflow import WorkflowDAG __all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate', 'RunAdHocCommand', 'RunWorkflowJob', 'handle_work_error', diff --git a/awx/main/tests/base.py b/awx/main/tests/base.py index 34be4081b8..3950b538c5 100644 --- a/awx/main/tests/base.py +++ b/awx/main/tests/base.py @@ -12,7 +12,6 @@ import sys import tempfile import time import urllib -from multiprocessing import Process import re import mock @@ -30,7 +29,6 @@ from django.utils.encoding import force_text # AWX from awx.main.models import * # noqa -from awx.main.management.commands.run_callback_receiver import CallbackReceiver from awx.main.utils import get_ansible_version from awx.main.task_engine import TaskEngager as LicenseWriter from awx.sso.backends import LDAPSettings From 6b0e3378862b783cc1fb03818f868db0a9cd8cb5 Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Thu, 29 Sep 2016 10:37:18 -0400 Subject: [PATCH 25/37] Update postgres yum/apt repo locations *Thanks postgres team --- tools/docker-compose/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker-compose/Dockerfile b/tools/docker-compose/Dockerfile index c583c4ddfd..ddd54616bb 100644 --- a/tools/docker-compose/Dockerfile +++ b/tools/docker-compose/Dockerfile @@ -9,7 +9,7 @@ RUN mkdir /tmp/requirements ADD requirements/requirements.txt requirements/requirements_ansible.txt requirements/requirements_dev.txt requirements/requirements_jenkins.txt /tmp/requirements/ RUN yum -y update && yum -y install curl epel-release RUN curl --silent --location https://rpm.nodesource.com/setup_6.x | bash - -RUN yum -y localinstall http://yum.postgresql.org/9.4/redhat/rhel-6-x86_64/pgdg-centos94-9.4-1.noarch.rpm +RUN yum -y localinstall http://download.postgresql.org/pub/repos/yum/9.4/redhat/rhel-6-x86_64/pgdg-centos94-9.4-3.noarch.rpm ADD tools/docker-compose/proot.repo /etc/yum.repos.d/proot.repo RUN yum -y update && yum -y install openssh-server ansible mg vim tmux git mercurial subversion python-devel python-psycopg2 make postgresql postgresql-devel nodejs python-psutil libxml2-devel libxslt-devel libstdc++.so.6 gcc cyrus-sasl-devel cyrus-sasl openldap-devel libffi-devel zeromq-devel proot python-pip xmlsec1-devel swig krb5-devel xmlsec1-openssl xmlsec1 xmlsec1-openssl-devel libtool-ltdl-devel RUN pip install flake8 pytest==2.9.2 pytest-pythonpath pytest-django pytest-cov pytest-mock dateutils django-debug-toolbar==1.4 pyflakes==1.0.0 virtualenv From a648beba9098f1e397f1e5b4d1dee4116aaad172 Mon Sep 17 00:00:00 2001 From: Matthew Jones Date: Thu, 29 Sep 2016 12:03:06 -0400 Subject: [PATCH 26/37] Rev django security version --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 3e82d6bc81..d236b5d1f1 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -12,7 +12,7 @@ cliff==1.15.0 cmd2==0.6.8 d2to1==0.2.11 # TODO: Still needed? defusedxml==0.4.1 -Django==1.8.10 +Django==1.8.15 debtcollector==1.2.0 decorator==4.0.6 django-auth-ldap==1.2.6 From ec2b0ac90d9d1ddda5e215d804decc8431bc8bfc Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 29 Sep 2016 16:01:15 -0400 Subject: [PATCH 27/37] add back in removed method --- awx/main/tasks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 07cd7df835..6dbac70108 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -305,6 +305,10 @@ class BaseTask(Task): logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt) + def signal_finished(self, pk): + pass + # notify_task_runner(dict(complete=pk)) + def get_path_to(self, *args): ''' Return absolute path relative to this file. From 1a60dd89bdacf11ba0c5443887e1981bbd66c24b Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 29 Sep 2016 16:01:15 -0400 Subject: [PATCH 28/37] add back in removed method --- awx/main/tasks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 07cd7df835..6dbac70108 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -305,6 +305,10 @@ class BaseTask(Task): logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt) + def signal_finished(self, pk): + pass + # notify_task_runner(dict(complete=pk)) + def get_path_to(self, *args): ''' Return absolute path relative to this file. From 9cafebd8db85e2330b46cf2ddfff2cd1a661c93d Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 29 Sep 2016 16:17:05 -0400 Subject: [PATCH 29/37] remove job to jt allow_simultaneous dependency * Foreshadowing of what's to come with the task manager. When deciding on what job to run in our task manager, we can't depend on job template fields. Otherwise, this would cost us a query. --- awx/api/serializers.py | 3 ++- .../migrations/0037_job_allow_simultaneous.py | 19 +++++++++++++++++++ awx/main/models/jobs.py | 10 +++++----- 3 files changed, 26 insertions(+), 6 deletions(-) create mode 100644 awx/main/migrations/0037_job_allow_simultaneous.py diff --git a/awx/api/serializers.py b/awx/api/serializers.py index db249bac9a..97191607b7 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -1953,7 +1953,8 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer): model = Job fields = ('*', 'job_template', 'passwords_needed_to_start', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch', - 'ask_job_type_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch') + 'ask_job_type_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', + 'allow_simultaneous',) def get_related(self, obj): res = super(JobSerializer, self).get_related(obj) diff --git a/awx/main/migrations/0037_job_allow_simultaneous.py b/awx/main/migrations/0037_job_allow_simultaneous.py new file mode 100644 index 0000000000..8a2e89df94 --- /dev/null +++ b/awx/main/migrations/0037_job_allow_simultaneous.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0036_v310_remove_tower_settings'), + ] + + operations = [ + migrations.AddField( + model_name='job', + name='allow_simultaneous', + field=models.BooleanField(default=False), + ), + ] diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index 1602872d2b..8fa9c8d176 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -138,6 +138,9 @@ class JobOptions(BaseModel): become_enabled = models.BooleanField( default=False, ) + allow_simultaneous = models.BooleanField( + default=False, + ) extra_vars_dict = VarsDictProperty('extra_vars', True) @@ -236,9 +239,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): read_role = ImplicitRoleField( parent_role=['project.organization.auditor_role', 'inventory.organization.auditor_role', 'execute_role', 'admin_role'], ) - allow_simultaneous = models.BooleanField( - default=False, - ) @classmethod @@ -251,7 +251,7 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, ResourceMixin): 'playbook', 'credential', 'cloud_credential', 'network_credential', 'forks', 'schedule', 'limit', 'verbosity', 'job_tags', 'extra_vars', 'launch_type', 'force_handlers', 'skip_tags', 'start_at_task', 'become_enabled', - 'labels', 'survey_passwords'] + 'labels', 'survey_passwords', 'allow_simultaneous',] def resource_validation_data(self): ''' @@ -616,7 +616,7 @@ class Job(UnifiedJob, JobOptions, JobNotificationMixin): if obj.job_template is not None and obj.inventory is not None: if obj.job_template == self.job_template and \ obj.inventory == self.inventory: - if self.job_template.allow_simultaneous: + if self.allow_simultaneous: return False if obj.launch_type == 'callback' and self.launch_type == 'callback' and \ obj.limit != self.limit: From cc4025977f3fadfe206d6ccb056dafc16f29a575 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Thu, 29 Sep 2016 17:01:11 -0400 Subject: [PATCH 30/37] Update help text to reflect 3.0 remove: default change --- awx/sso/conf.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/awx/sso/conf.py b/awx/sso/conf.py index e0842f6031..5bcff670cf 100644 --- a/awx/sso/conf.py +++ b/awx/sso/conf.py @@ -44,12 +44,12 @@ which users can administer the organization. users who will be added to the organization. Strings in the format "//" will be interpreted as regular expressions and may also be used instead of string literals; only "i" and "m" are supported for flags. -- remove_admins: True/False. Defaults to False. +- remove_admins: True/False. Defaults to True. If True, a user who does not match will be removed from the organization's administrative list. - users: None, True/False, string or list/tuple of strings. Same rules apply as for admins. -- remove_users: True/False. Defaults to False. Same rules as apply for +- remove_users: True/False. Defaults to True. Same rules as apply for remove_admins.\ ''') @@ -90,7 +90,7 @@ for each team's membership, where each can contain the following parameters: Strings in the format "//" will be interpreted as regular expressions and may also be used instead of string literals; only "i" and "m" are supported for flags. -- remove: True/False. Defaults to False. If True, a user who does not match +- remove: True/False. Defaults to True. If True, a user who does not match the rules above will be removed from the team.\ ''') @@ -434,7 +434,7 @@ register( ' If a string or list of strings, specifies the group DN(s). ' 'User will be added as a team member if the user is a member of ' 'ANY of these groups.\n' - '- remove: True/False. Defaults to False. If True, a user who is ' + '- remove: True/False. Defaults to True. If True, a user who is ' 'not a member of the given groups will be removed from the team.'), category=_('LDAP'), category_slug='ldap', From 70e489d62f7d9f3b60b2c878ff7fde360b9b4f18 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 30 Sep 2016 15:30:34 -0400 Subject: [PATCH 31/37] Save settings.LICENSE as dict instead of str. --- awx/api/views.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/awx/api/views.py b/awx/api/views.py index b91f963b4e..61e4c56a0c 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -246,16 +246,16 @@ class ApiV1ConfigView(APIView): try: from awx.main.task_engine import TaskEnhancer license_data = json.loads(data_actual) - license_data = TaskEnhancer(**license_data).validate_enhancements() + license_data_validated = TaskEnhancer(**license_data).validate_enhancements() except Exception: # FIX: Log return Response({"error": "Invalid License"}, status=status.HTTP_400_BAD_REQUEST) # If the license is valid, write it to the database. - if license_data['valid_key']: - settings.LICENSE = data_actual + if license_data_validated['valid_key']: + settings.LICENSE = license_data settings.TOWER_URL_BASE = "{}://{}".format(request.scheme, request.get_host()) - return Response(license_data) + return Response(license_data_validated) return Response({"error": "Invalid license"}, status=status.HTTP_400_BAD_REQUEST) From 649bf08edf6885439c0dccdcf5030fdc91d7fa86 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 3 Oct 2016 08:15:39 -0400 Subject: [PATCH 32/37] add workflow root endpoints --- awx/api/serializers.py | 2 +- awx/api/views.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 97191607b7..ad5c30d15f 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -2278,7 +2278,7 @@ class WorkflowJobTemplateNodeSerializer(WorkflowNodeBaseSerializer): class WorkflowJobNodeSerializer(WorkflowNodeBaseSerializer): class Meta: - model = WorkflowJobTemplateNode + model = WorkflowJobNode fields = ('*', 'job', 'workflow_job',) def get_related(self, obj): diff --git a/awx/api/views.py b/awx/api/views.py index 61e4c56a0c..d1fea73fe2 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -145,6 +145,8 @@ class ApiV1RootView(APIView): data['activity_stream'] = reverse('api:activity_stream_list') data['workflow_job_templates'] = reverse('api:workflow_job_template_list') data['workflow_jobs'] = reverse('api:workflow_job_list') + data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list') + data['workflow_job_nodes'] = reverse('api:workflow_job_node_list') return Response(data) @@ -2607,28 +2609,24 @@ class JobTemplateObjectRolesList(SubListAPIView): content_type = ContentType.objects.get_for_model(self.parent_model) return Role.objects.filter(content_type=content_type, object_id=po.pk) -# TODO: class WorkflowJobNodeList(ListCreateAPIView): model = WorkflowJobNode serializer_class = WorkflowJobNodeListSerializer new_in_310 = True -# TODO: class WorkflowJobNodeDetail(RetrieveUpdateDestroyAPIView): model = WorkflowJobNode serializer_class = WorkflowJobNodeDetailSerializer new_in_310 = True -# TODO: class WorkflowJobTemplateNodeList(ListCreateAPIView): model = WorkflowJobTemplateNode serializer_class = WorkflowJobTemplateNodeListSerializer new_in_310 = True -# TODO: class WorkflowJobTemplateNodeDetail(RetrieveUpdateDestroyAPIView): model = WorkflowJobTemplateNode From 203df91a5d2625113ac126c40dfdb76c08beb778 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 3 Oct 2016 09:28:01 -0400 Subject: [PATCH 33/37] more robust test mode checking --- awx/main/managers.py | 3 ++- awx/settings/defaults.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/awx/main/managers.py b/awx/main/managers.py index 15e1c9d655..86c3367140 100644 --- a/awx/main/managers.py +++ b/awx/main/managers.py @@ -4,6 +4,7 @@ import sys from django.db import models +from django.conf import settings class HostManager(models.Manager): @@ -25,7 +26,7 @@ class InstanceManager(models.Manager): def me(self): """Return the currently active instance.""" # If we are running unit tests, return a stub record. - if len(sys.argv) >= 2 and sys.argv[1] == 'test': + if settings.IS_TESTING(sys.argv): return self.model(id=1, hostname='localhost', uuid='00000000-0000-0000-0000-000000000000') diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 9da2142c19..f24ee87838 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -30,6 +30,9 @@ def is_testing(argv=None): return True return False +def IS_TESTING(argv=None): + return is_testing(argv) + DEBUG = True TEMPLATE_DEBUG = DEBUG SQL_DEBUG = DEBUG From 15eaac049c56a0e24690148eaa1e4f7e49fd8978 Mon Sep 17 00:00:00 2001 From: Shane McDonald Date: Mon, 3 Oct 2016 09:36:33 -0400 Subject: [PATCH 34/37] Add pycparser to SRC_ONLY_PKGS Fixes stuff due to https://github.com/pyca/cryptography/issues/3187 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 510fff2318..86685855a8 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ CELERY_SCHEDULE_FILE ?= /celerybeat-schedule # Python packages to install only from source (not from binary wheels) # Comma separated list -SRC_ONLY_PKGS ?= cffi +SRC_ONLY_PKGS ?= cffi,pycparser # Determine appropriate shasum command UNAME_S := $(shell uname -s) From b4c852cf5347dc3e27b13c8260efa4d07215a8ff Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 3 Oct 2016 10:06:44 -0400 Subject: [PATCH 35/37] change migration name to align with version semantic --- ..._allow_simultaneous.py => 0037_v310_job_allow_simultaneous.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename awx/main/migrations/{0037_job_allow_simultaneous.py => 0037_v310_job_allow_simultaneous.py} (100%) diff --git a/awx/main/migrations/0037_job_allow_simultaneous.py b/awx/main/migrations/0037_v310_job_allow_simultaneous.py similarity index 100% rename from awx/main/migrations/0037_job_allow_simultaneous.py rename to awx/main/migrations/0037_v310_job_allow_simultaneous.py From 3fab2b63b93eb2864cc149207e9b503491253eae Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Thu, 29 Sep 2016 15:56:36 -0400 Subject: [PATCH 36/37] dev non-clustering rabbitmq with lvc enabled --- tools/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker-compose.yml b/tools/docker-compose.yml index 2e5b18460d..a930f1569d 100644 --- a/tools/docker-compose.yml +++ b/tools/docker-compose.yml @@ -30,7 +30,7 @@ services: memcached: image: memcached:alpine rabbitmq: - image: rabbitmq:3-management + image: gcr.io/ansible-tower-engineering/rabbitmq:latest ports: - "15672:15672" From 0088c5a4104e02a2052b9e4e816aa483c1642d52 Mon Sep 17 00:00:00 2001 From: Wayne Witzel III Date: Mon, 3 Oct 2016 10:27:08 -0400 Subject: [PATCH 37/37] add asgi_amqp req --- requirements/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 0332dfcc34..2ee1c5b9b8 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -9,6 +9,7 @@ billiard==3.3.0.16 boto==2.40.0 celery==3.1.23 channels==0.17.2 +asgi_amqp==0.2.1 cliff==1.15.0 cmd2==0.6.8 d2to1==0.2.11 # TODO: Still needed?