diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 106e4b0198..873f822171 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -38,7 +38,7 @@ from rest_framework.utils.serializer_helpers import ReturnList from polymorphic.models import PolymorphicModel # AWX -from awx.main.constants import SCHEDULEABLE_PROVIDERS +from awx.main.constants import SCHEDULEABLE_PROVIDERS, ANSI_SGR_PATTERN from awx.main.models import * # noqa from awx.main.access import get_user_capabilities from awx.main.fields import ImplicitRoleField @@ -343,6 +343,8 @@ class BaseSerializer(serializers.ModelSerializer): continue summary_fields[fk] = OrderedDict() for field in related_fields: + if field == 'credential_type_id' and fk == 'credential' and self.version < 2: # TODO: remove version check in 3.3 + continue fval = getattr(fkval, field, None) @@ -2332,8 +2334,13 @@ class JobOptionsSerializer(LabelsListMixin, BaseSerializer): if obj.vault_credential: res['vault_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.vault_credential.pk}) if self.version > 1: - view = 'api:%s_extra_credentials_list' % camelcase_to_underscore(obj.__class__.__name__) - res['extra_credentials'] = self.reverse(view, kwargs={'pk': obj.pk}) + if isinstance(obj, UnifiedJobTemplate): + res['extra_credentials'] = self.reverse( + 'api:job_template_extra_credentials_list', + kwargs={'pk': obj.pk} + ) + elif isinstance(obj, UnifiedJob): + res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk}) else: cloud_cred = obj.cloud_credential if cloud_cred: @@ -3120,6 +3127,14 @@ class JobEventSerializer(BaseSerializer): max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes: ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026' + set_count = 0 + reset_count = 0 + for m in ANSI_SGR_PATTERN.finditer(ret['stdout']): + if m.string[m.start():m.end()] == u'\u001b[0m': + reset_count += 1 + else: + set_count += 1 + ret['stdout'] += u'\u001b[0m' * (set_count - reset_count) return ret @@ -3151,6 +3166,14 @@ class AdHocCommandEventSerializer(BaseSerializer): max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes: ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026' + set_count = 0 + reset_count = 0 + for m in ANSI_SGR_PATTERN.finditer(ret['stdout']): + if m.string[m.start():m.end()] == u'\u001b[0m': + reset_count += 1 + else: + set_count += 1 + ret['stdout'] += u'\u001b[0m' * (set_count - reset_count) return ret diff --git a/awx/api/views.py b/awx/api/views.py index 5757c26677..8c1910ea55 100644 --- a/awx/api/views.py +++ b/awx/api/views.py @@ -2383,24 +2383,24 @@ class InventoryScriptView(RetrieveAPIView): host = get_object_or_404(obj.hosts, name=hostname, **hosts_q) data = host.variables_dict else: - data = OrderedDict() + data = dict() if obj.variables_dict: - all_group = data.setdefault('all', OrderedDict()) + all_group = data.setdefault('all', dict()) all_group['vars'] = obj.variables_dict if obj.kind == 'smart': if len(obj.hosts.all()) == 0: return Response({}) else: - all_group = data.setdefault('all', OrderedDict()) - smart_hosts_qs = obj.hosts.all().order_by('name') + all_group = data.setdefault('all', dict()) + smart_hosts_qs = obj.hosts.all() smart_hosts = list(smart_hosts_qs.values_list('name', flat=True)) all_group['hosts'] = smart_hosts else: # Add hosts without a group to the all group. - groupless_hosts_qs = obj.hosts.filter(groups__isnull=True, **hosts_q).order_by('name') + groupless_hosts_qs = obj.hosts.filter(groups__isnull=True, **hosts_q) groupless_hosts = list(groupless_hosts_qs.values_list('name', flat=True)) if groupless_hosts: - all_group = data.setdefault('all', OrderedDict()) + all_group = data.setdefault('all', dict()) all_group['hosts'] = groupless_hosts # Build in-memory mapping of groups and their hosts. @@ -2408,7 +2408,6 @@ class InventoryScriptView(RetrieveAPIView): if 'enabled' in hosts_q: group_hosts_kw['host__enabled'] = hosts_q['enabled'] group_hosts_qs = Group.hosts.through.objects.filter(**group_hosts_kw) - group_hosts_qs = group_hosts_qs.order_by('host__name') group_hosts_qs = group_hosts_qs.values_list('group_id', 'host_id', 'host__name') group_hosts_map = {} for group_id, host_id, host_name in group_hosts_qs: @@ -2420,7 +2419,6 @@ class InventoryScriptView(RetrieveAPIView): from_group__inventory_id=obj.id, to_group__inventory_id=obj.id, ) - group_parents_qs = group_parents_qs.order_by('from_group__name') group_parents_qs = group_parents_qs.values_list('from_group_id', 'from_group__name', 'to_group_id') group_children_map = {} for from_group_id, from_group_name, to_group_id in group_parents_qs: @@ -2429,15 +2427,15 @@ class InventoryScriptView(RetrieveAPIView): # Now use in-memory maps to build up group info. for group in obj.groups.all(): - group_info = OrderedDict() + group_info = dict() group_info['hosts'] = group_hosts_map.get(group.id, []) group_info['children'] = group_children_map.get(group.id, []) group_info['vars'] = group.variables_dict data[group.name] = group_info if hostvars: - data.setdefault('_meta', OrderedDict()) - data['_meta'].setdefault('hostvars', OrderedDict()) + data.setdefault('_meta', dict()) + data['_meta'].setdefault('hostvars', dict()) for host in obj.hosts.filter(**hosts_q): data['_meta']['hostvars'][host.name] = host.variables_dict @@ -2669,6 +2667,12 @@ class InventoryUpdateList(ListAPIView): model = InventoryUpdate serializer_class = InventoryUpdateListSerializer + def get_queryset(self): + qs = super(InventoryUpdateList, self).get_queryset() + # TODO: remove this defer in 3.3 when we implement https://github.com/ansible/ansible-tower/issues/5436 + qs = qs.defer('result_stdout_text') + return qs + class InventoryUpdateDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView): diff --git a/awx/conf/license.py b/awx/conf/license.py index 57456f90fa..17a1d323c7 100644 --- a/awx/conf/license.py +++ b/awx/conf/license.py @@ -8,7 +8,7 @@ from django.utils.translation import ugettext_lazy as _ from rest_framework.exceptions import APIException # Tower -from awx.main.utils.common import get_licenser +from awx.main.utils.common import get_licenser, memoize __all__ = ['LicenseForbids', 'get_license', 'get_licensed_features', 'feature_enabled', 'feature_exists'] @@ -40,6 +40,7 @@ def get_licensed_features(): return features +@memoize(cache_name='ephemeral') def feature_enabled(name): """Return True if the requested feature is enabled, False otherwise.""" validated_license_data = _get_validated_license_data() diff --git a/awx/main/access.py b/awx/main/access.py index bf72dd3d34..80ebf7b846 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -27,7 +27,7 @@ from awx.main.models import * # noqa from awx.main.models.unified_jobs import ACTIVE_STATES from awx.main.models.mixins import ResourceMixin -from awx.conf.license import LicenseForbids +from awx.conf.license import LicenseForbids, feature_enabled __all__ = ['get_user_queryset', 'check_user_access', 'check_user_access_with_errors', 'user_accessible_objects', 'consumer_access', @@ -140,7 +140,14 @@ def get_user_capabilities(user, instance, **kwargs): convenient for the user interface to consume and hide or show various actions in the interface. ''' - access_class = access_registry[instance.__class__] + cls = instance.__class__ + # When `.defer()` is used w/ the Django ORM, the result is a subclass of + # the original that represents e.g., + # awx.main.models.ad_hoc_commands.AdHocCommand_Deferred_result_stdout_text + # We want to do the access registry lookup keyed on the base class name. + if getattr(cls, '_deferred', False): + cls = instance.__class__.__bases__[0] + access_class = access_registry[cls] return access_class(user).get_user_capabilities(instance, **kwargs) @@ -323,6 +330,10 @@ class BaseAccess(object): if validation_errors: user_capabilities[display_method] = False continue + elif isinstance(obj, (WorkflowJobTemplate, WorkflowJob)): + if not feature_enabled('workflows'): + user_capabilities[display_method] = (display_method == 'delete') + continue elif display_method == 'copy' and isinstance(obj, WorkflowJobTemplate) and obj.organization_id is None: user_capabilities[display_method] = self.user.is_superuser continue @@ -482,8 +493,10 @@ class UserAccess(BaseAccess): def can_change(self, obj, data): if data is not None and ('is_superuser' in data or 'is_system_auditor' in data): - if (to_python_boolean(data.get('is_superuser', 'false'), allow_none=True) or - to_python_boolean(data.get('is_system_auditor', 'false'), allow_none=True)) and not self.user.is_superuser: + if to_python_boolean(data.get('is_superuser', 'false'), allow_none=True) and \ + not self.user.is_superuser: + return False + if to_python_boolean(data.get('is_system_auditor', 'false'), allow_none=True) and not (self.user.is_superuser or self.user == obj): return False # A user can be changed if they are themselves, or by org admins or # superusers. Change permission implies changing only certain fields @@ -2068,6 +2081,8 @@ class UnifiedJobAccess(BaseAccess): # 'job_template__project', # 'job_template__credential', #) + # TODO: remove this defer in 3.3 when we implement https://github.com/ansible/ansible-tower/issues/5436 + qs = qs.defer('result_stdout_text') return qs.all() diff --git a/awx/main/constants.py b/awx/main/constants.py index bd00147415..10be060094 100644 --- a/awx/main/constants.py +++ b/awx/main/constants.py @@ -1,8 +1,11 @@ # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. +import re + from django.utils.translation import ugettext_lazy as _ CLOUD_PROVIDERS = ('azure', 'azure_rm', 'ec2', 'gce', 'rax', 'vmware', 'openstack', 'satellite6', 'cloudforms') SCHEDULEABLE_PROVIDERS = CLOUD_PROVIDERS + ('custom', 'scm',) PRIVILEGE_ESCALATION_METHODS = [ ('sudo', _('Sudo')), ('su', _('Su')), ('pbrun', _('Pbrun')), ('pfexec', _('Pfexec')), ('dzdo', _('DZDO')), ('pmrun', _('Pmrun')), ('runas', _('Runas'))] +ANSI_SGR_PATTERN = re.compile(r'\x1b\[[0-9;]*m') diff --git a/awx/main/management/commands/deprovision_instance.py b/awx/main/management/commands/deprovision_instance.py index 117c615db5..bd26bb74f0 100644 --- a/awx/main/management/commands/deprovision_instance.py +++ b/awx/main/management/commands/deprovision_instance.py @@ -17,6 +17,11 @@ class Command(BaseCommand): Deprovision a Tower cluster node """ + help = ( + 'Remove instance from the database. ' + 'Specify `--hostname` to use this command.' + ) + option_list = BaseCommand.option_list + ( make_option('--hostname', dest='hostname', type='string', help='Hostname used during provisioning'), diff --git a/awx/main/management/commands/provision_instance.py b/awx/main/management/commands/provision_instance.py index 98831dfb52..24aad2aebb 100644 --- a/awx/main/management/commands/provision_instance.py +++ b/awx/main/management/commands/provision_instance.py @@ -16,6 +16,11 @@ class Command(BaseCommand): Regsiter this instance with the database for HA tracking. """ + help = ( + 'Add instance to the database. ' + 'Specify `--hostname` to use this command.' + ) + option_list = BaseCommand.option_list + ( make_option('--hostname', dest='hostname', type='string', help='Hostname used during provisioning'), diff --git a/awx/main/migrations/_scan_jobs.py b/awx/main/migrations/_scan_jobs.py index b953ba7d68..4dfc7cf972 100644 --- a/awx/main/migrations/_scan_jobs.py +++ b/awx/main/migrations/_scan_jobs.py @@ -14,7 +14,7 @@ def _create_fact_scan_project(ContentType, Project, org): ct = ContentType.objects.get_for_model(Project) name = "Tower Fact Scan - {}".format(org.name if org else "No Organization") proj = Project(name=name, - scm_url='https://github.com/ansible/tower-fact-modules', + scm_url='https://github.com/ansible/awx-facts-playbooks', scm_type='git', scm_update_on_launch=True, scm_update_cache_timeout=86400, diff --git a/awx/main/models/__init__.py b/awx/main/models/__init__.py index 01bd68cc66..66aab8242f 100644 --- a/awx/main/models/__init__.py +++ b/awx/main/models/__init__.py @@ -145,3 +145,17 @@ activity_stream_registrar.connect(WorkflowJob) # prevent API filtering on certain Django-supplied sensitive fields prevent_search(User._meta.get_field('password')) + + +# Always, always, always defer result_stdout_text for polymorphic UnifiedJob rows +# TODO: remove this defer in 3.3 when we implement https://github.com/ansible/ansible-tower/issues/5436 +def defer_stdout(f): + def _wrapped(*args, **kwargs): + objs = f(*args, **kwargs) + objs.query.deferred_loading[0].add('result_stdout_text') + return objs + return _wrapped + + +for cls in UnifiedJob.__subclasses__(): + cls.base_objects.filter = defer_stdout(cls.base_objects.filter) diff --git a/awx/main/scheduler/__init__.py b/awx/main/scheduler/__init__.py index a6f477539c..549f1546f3 100644 --- a/awx/main/scheduler/__init__.py +++ b/awx/main/scheduler/__init__.py @@ -300,7 +300,7 @@ class TaskManager(): # Already processed dependencies for this job if job.dependent_jobs.all(): return False - latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("created") + latest_inventory_update = InventoryUpdate.objects.filter(inventory_source=inventory_source).order_by("-created") if not latest_inventory_update.exists(): return True latest_inventory_update = latest_inventory_update.first() @@ -323,7 +323,7 @@ class TaskManager(): now = tz_now() if job.dependent_jobs.all(): return False - latest_project_update = ProjectUpdate.objects.filter(project=job.project).order_by("created") + latest_project_update = ProjectUpdate.objects.filter(project=job.project, job_type='check').order_by("-created") if not latest_project_update.exists(): return True latest_project_update = latest_project_update.first() @@ -421,26 +421,40 @@ class TaskManager(): if not found_acceptable_queue: logger.debug("%s couldn't be scheduled on graph, waiting for next cycle", task.log_format) - def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time): + def fail_jobs_if_not_in_celery(self, node_jobs, active_tasks, celery_task_start_time, + isolated=False): for task in node_jobs: if (task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR')): if isinstance(task, WorkflowJob): continue if task.modified > celery_task_start_time: continue - task.status = 'failed' - task.job_explanation += ' '.join(( - 'Task was marked as running in Tower but was not present in', - 'Celery, so it has been marked as failed.', - )) + new_status = 'failed' + if isolated: + new_status = 'error' + task.status = new_status + if isolated: + # TODO: cancel and reap artifacts of lost jobs from heartbeat + task.job_explanation += ' '.join(( + 'Task was marked as running in Tower but its ', + 'controller management daemon was not present in', + 'Celery, so it has been marked as failed.', + 'Task may still be running, but contactability is unknown.' + )) + else: + task.job_explanation += ' '.join(( + 'Task was marked as running in Tower but was not present in', + 'Celery, so it has been marked as failed.', + )) try: task.save(update_fields=['status', 'job_explanation']) except DatabaseError: logger.error("Task {} DB error in marking failed. Job possibly deleted.".format(task.log_format)) continue awx_tasks._send_notification_templates(task, 'failed') - task.websocket_emit_status('failed') - logger.error("Task {} has no record in celery. Marking as failed".format(task.log_format)) + task.websocket_emit_status(new_status) + logger.error("{}Task {} has no record in celery. Marking as failed".format( + 'Isolated ' if isolated else '', task.log_format)) def cleanup_inconsistent_celery_tasks(self): ''' @@ -471,26 +485,36 @@ class TaskManager(): self.fail_jobs_if_not_in_celery(waiting_tasks, all_celery_task_ids, celery_task_start_time) for node, node_jobs in running_tasks.iteritems(): + isolated = False if node in active_queues: active_tasks = active_queues[node] else: ''' - Node task list not found in celery. If tower thinks the node is down - then fail all the jobs on the node. + Node task list not found in celery. We may branch into cases: + - instance is unknown to tower, system is improperly configured + - instance is reported as down, then fail all jobs on the node + - instance is an isolated node, then check running tasks + among all allowed controller nodes for management process ''' - try: - instance = Instance.objects.get(hostname=node) - if instance.capacity == 0: - active_tasks = [] - else: - continue - except Instance.DoesNotExist: - logger.error("Execution node Instance {} not found in database. " - "The node is currently executing jobs {}".format(node, - [j.log_format for j in node_jobs])) - active_tasks = [] + instance = Instance.objects.filter(hostname=node).first() - self.fail_jobs_if_not_in_celery(node_jobs, active_tasks, celery_task_start_time) + if instance is None: + logger.error("Execution node Instance {} not found in database. " + "The node is currently executing jobs {}".format( + node, [j.log_format for j in node_jobs])) + active_tasks = [] + elif instance.capacity == 0: + active_tasks = [] + elif instance.rampart_groups.filter(controller__isnull=False).exists(): + active_tasks = all_celery_task_ids + isolated = True + else: + continue + + self.fail_jobs_if_not_in_celery( + node_jobs, active_tasks, celery_task_start_time, + isolated=isolated + ) def calculate_capacity_consumed(self, tasks): self.graph = InstanceGroup.objects.capacity_values(tasks=tasks, graph=self.graph) diff --git a/awx/main/signals.py b/awx/main/signals.py index 81f6e17092..77fd91a5c3 100644 --- a/awx/main/signals.py +++ b/awx/main/signals.py @@ -388,6 +388,9 @@ def activity_stream_create(sender, instance, created, **kwargs): # Skip recording any inventory source directly associated with a group. if isinstance(instance, InventorySource) and instance.deprecated_group: return + _type = type(instance) + if getattr(_type, '_deferred', False): + return object1 = camelcase_to_underscore(instance.__class__.__name__) changes = model_to_dict(instance, model_serializer_mapping) # Special case where Job survey password variables need to be hidden @@ -421,6 +424,9 @@ def activity_stream_update(sender, instance, **kwargs): changes = model_instance_diff(old, new, model_serializer_mapping) if changes is None: return + _type = type(instance) + if getattr(_type, '_deferred', False): + return object1 = camelcase_to_underscore(instance.__class__.__name__) activity_entry = ActivityStream( operation='update', @@ -445,6 +451,9 @@ def activity_stream_delete(sender, instance, **kwargs): # explicitly called with flag on in Inventory.schedule_deletion. if isinstance(instance, Inventory) and not kwargs.get('inventory_delete_flag', False): return + _type = type(instance) + if getattr(_type, '_deferred', False): + return changes = model_to_dict(instance) object1 = camelcase_to_underscore(instance.__class__.__name__) activity_entry = ActivityStream( @@ -466,6 +475,9 @@ def activity_stream_associate(sender, instance, **kwargs): else: return obj1 = instance + _type = type(instance) + if getattr(_type, '_deferred', False): + return object1=camelcase_to_underscore(obj1.__class__.__name__) obj_rel = sender.__module__ + "." + sender.__name__ @@ -476,6 +488,9 @@ def activity_stream_associate(sender, instance, **kwargs): if not obj2_actual.exists(): continue obj2_actual = obj2_actual[0] + _type = type(obj2_actual) + if getattr(_type, '_deferred', False): + return if isinstance(obj2_actual, Role) and obj2_actual.content_object is not None: obj2_actual = obj2_actual.content_object object2 = camelcase_to_underscore(obj2_actual.__class__.__name__) diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 4e3f0386de..dbe7b13309 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -320,7 +320,11 @@ def awx_periodic_scheduler(self): def _send_notification_templates(instance, status_str): if status_str not in ['succeeded', 'failed']: raise ValueError(_("status_str must be either succeeded or failed")) - notification_templates = instance.get_notification_templates() + try: + notification_templates = instance.get_notification_templates() + except: + logger.warn("No notification template defined for emitting notification") + notification_templates = None if notification_templates: if status_str == 'succeeded': notification_template_type = 'success' @@ -482,6 +486,7 @@ class BaseTask(LogErrorsTask): model = None abstract = True cleanup_paths = [] + proot_show_paths = [] def update_model(self, pk, _attempt=0, **updates): """Reload the model instance from the database and update the @@ -793,6 +798,7 @@ class BaseTask(LogErrorsTask): # May have to serialize the value kwargs['private_data_files'] = self.build_private_data_files(instance, **kwargs) kwargs['passwords'] = self.build_passwords(instance, **kwargs) + kwargs['proot_show_paths'] = self.proot_show_paths args = self.build_args(instance, **kwargs) safe_args = self.build_safe_args(instance, **kwargs) output_replacements = self.build_output_replacements(instance, **kwargs) @@ -1068,6 +1074,7 @@ class RunJob(BaseTask): env['VMWARE_USER'] = cloud_cred.username env['VMWARE_PASSWORD'] = decrypt_field(cloud_cred, 'password') env['VMWARE_HOST'] = cloud_cred.host + env['VMWARE_VALIDATE_CERTS'] = str(settings.VMWARE_VALIDATE_CERTS) elif cloud_cred and cloud_cred.kind == 'openstack': env['OS_CLIENT_CONFIG_FILE'] = cred_files.get(cloud_cred, '') @@ -1285,6 +1292,10 @@ class RunProjectUpdate(BaseTask): name = 'awx.main.tasks.run_project_update' model = ProjectUpdate + @property + def proot_show_paths(self): + return [settings.PROJECTS_ROOT] + def build_private_data(self, project_update, **kwargs): ''' Return SSH private key data needed for this project update. @@ -1298,7 +1309,7 @@ class RunProjectUpdate(BaseTask): } } ''' - handle, self.revision_path = tempfile.mkstemp(dir=settings.AWX_PROOT_BASE_PATH) + handle, self.revision_path = tempfile.mkstemp(dir=settings.PROJECTS_ROOT) self.cleanup_paths.append(self.revision_path) private_data = {'credentials': {}} if project_update.credential: @@ -1591,6 +1602,12 @@ class RunProjectUpdate(BaseTask): if status == 'successful' and instance.launch_type != 'sync': self._update_dependent_inventories(instance, dependent_inventory_sources) + def should_use_proot(self, instance, **kwargs): + ''' + Return whether this task should use proot. + ''' + return getattr(settings, 'AWX_PROOT_ENABLED', False) + class RunInventoryUpdate(BaseTask): diff --git a/awx/main/tests/functional/test_rbac_user.py b/awx/main/tests/functional/test_rbac_user.py index 8f307ea0e3..bbfe0267cd 100644 --- a/awx/main/tests/functional/test_rbac_user.py +++ b/awx/main/tests/functional/test_rbac_user.py @@ -44,6 +44,12 @@ def test_system_auditor_is_system_auditor(system_auditor): assert system_auditor.is_system_auditor +@pytest.mark.django_db +def test_system_auditor_can_modify_self(system_auditor): + access = UserAccess(system_auditor) + assert access.can_change(obj=system_auditor, data=dict(is_system_auditor='true')) + + @pytest.mark.django_db def test_user_queryset(user): u = user('pete', False) diff --git a/awx/main/tests/unit/test_task_manager.py b/awx/main/tests/unit/test_task_manager.py index 65b7607bb4..1937b7b5ca 100644 --- a/awx/main/tests/unit/test_task_manager.py +++ b/awx/main/tests/unit/test_task_manager.py @@ -21,7 +21,7 @@ class TestCleanupInconsistentCeleryTasks(): @mock.patch.object(TaskManager, 'get_active_tasks', return_value=([], {})) @mock.patch.object(TaskManager, 'get_running_tasks', return_value=({'host1': [Job(id=2), Job(id=3),]}, [])) @mock.patch.object(InstanceGroup.objects, 'prefetch_related', return_value=[]) - @mock.patch.object(Instance.objects, 'get', side_effect=Instance.DoesNotExist) + @mock.patch.object(Instance.objects, 'filter', return_value=mock.MagicMock(first=lambda: None)) @mock.patch('awx.main.scheduler.logger') def test_instance_does_not_exist(self, logger_mock, *args): logger_mock.error = mock.MagicMock(side_effect=RuntimeError("mocked")) diff --git a/awx/main/tests/unit/test_tasks.py b/awx/main/tests/unit/test_tasks.py index 99ad34d766..bbc52fad8d 100644 --- a/awx/main/tests/unit/test_tasks.py +++ b/awx/main/tests/unit/test_tasks.py @@ -181,6 +181,8 @@ class TestJobExecution: EXAMPLE_PRIVATE_KEY = '-----BEGIN PRIVATE KEY-----\nxyz==\n-----END PRIVATE KEY-----' def setup_method(self, method): + if not os.path.exists(settings.PROJECTS_ROOT): + os.mkdir(settings.PROJECTS_ROOT) self.project_path = tempfile.mkdtemp(prefix='awx_project_') with open(os.path.join(self.project_path, 'helloworld.yml'), 'w') as f: f.write('---') @@ -281,6 +283,15 @@ class TestGenericRun(TestJobExecution): args, cwd, env, stdout = call_args assert args[0] == 'bwrap' + def test_bwrap_virtualenvs_are_readonly(self): + self.task.run(self.pk) + + assert self.run_pexpect.call_count == 1 + call_args, _ = self.run_pexpect.call_args_list[0] + args, cwd, env, stdout = call_args + assert '--ro-bind %s %s' % (settings.ANSIBLE_VENV_PATH, settings.ANSIBLE_VENV_PATH) in ' '.join(args) # noqa + assert '--ro-bind %s %s' % (settings.AWX_VENV_PATH, settings.AWX_VENV_PATH) in ' '.join(args) # noqa + def test_awx_task_env(self): patch = mock.patch('awx.main.tasks.settings.AWX_TASK_ENV', {'FOO': 'BAR'}) patch.start() @@ -1096,6 +1107,27 @@ class TestProjectUpdateCredentials(TestJobExecution): ] } + def test_bwrap_exposes_projects_root(self): + ssh = CredentialType.defaults['ssh']() + self.instance.scm_type = 'git' + self.instance.credential = Credential( + pk=1, + credential_type=ssh, + ) + self.task.run(self.pk) + + assert self.run_pexpect.call_count == 1 + call_args, call_kwargs = self.run_pexpect.call_args_list[0] + args, cwd, env, stdout = call_args + + assert ' '.join(args).startswith('bwrap') + ' '.join([ + '--bind', + settings.PROJECTS_ROOT, + settings.PROJECTS_ROOT, + ]) in ' '.join(args) + assert '"scm_revision_output": "/projects/tmp' in ' '.join(args) + def test_username_and_password_auth(self, scm_type): ssh = CredentialType.defaults['ssh']() self.instance.scm_type = scm_type diff --git a/awx/main/utils/common.py b/awx/main/utils/common.py index 345f4790ea..bff97907b0 100644 --- a/awx/main/utils/common.py +++ b/awx/main/utils/common.py @@ -108,13 +108,14 @@ class RequireDebugTrueOrTest(logging.Filter): return settings.DEBUG or 'test' in sys.argv -def memoize(ttl=60, cache_key=None): +def memoize(ttl=60, cache_key=None, cache_name='default'): ''' Decorator to wrap a function and cache its result. ''' - from django.core.cache import cache + from django.core.cache import caches def _memoizer(f, *args, **kwargs): + cache = caches[cache_name] key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs)) value = cache.get(key) if value is None: @@ -696,8 +697,13 @@ def wrap_args_with_proot(args, cwd, **kwargs): show_paths = [cwd, kwargs['private_data_dir']] else: show_paths = [cwd] - show_paths.extend([settings.ANSIBLE_VENV_PATH, settings.AWX_VENV_PATH]) + for venv in ( + settings.ANSIBLE_VENV_PATH, + settings.AWX_VENV_PATH + ): + new_args.extend(['--ro-bind', venv, venv]) show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or []) + show_paths.extend(kwargs.get('proot_show_paths', [])) for path in sorted(set(show_paths)): if not os.path.exists(path): continue diff --git a/awx/plugins/inventory/azure_rm.py b/awx/plugins/inventory/azure_rm.py index 73b8b959d3..b3b7e1e904 100755 --- a/awx/plugins/inventory/azure_rm.py +++ b/awx/plugins/inventory/azure_rm.py @@ -49,6 +49,7 @@ Command line arguments: - tenant - ad_user - password + - cloud_environment Environment variables: - AZURE_PROFILE @@ -58,6 +59,7 @@ Environment variables: - AZURE_TENANT - AZURE_AD_USER - AZURE_PASSWORD + - AZURE_CLOUD_ENVIRONMENT Run for Specific Host ----------------------- @@ -190,22 +192,27 @@ import json import os import re import sys +import inspect +import traceback + from packaging.version import Version from os.path import expanduser +import ansible.module_utils.six.moves.urllib.parse as urlparse HAS_AZURE = True HAS_AZURE_EXC = None try: from msrestazure.azure_exceptions import CloudError + from msrestazure import azure_cloud from azure.mgmt.compute import __version__ as azure_compute_version from azure.common import AzureMissingResourceHttpError, AzureHttpError from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials - from azure.mgmt.network.network_management_client import NetworkManagementClient - from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient - from azure.mgmt.compute.compute_management_client import ComputeManagementClient + from azure.mgmt.network import NetworkManagementClient + from azure.mgmt.resource.resources import ResourceManagementClient + from azure.mgmt.compute import ComputeManagementClient except ImportError as exc: HAS_AZURE_EXC = exc HAS_AZURE = False @@ -218,7 +225,8 @@ AZURE_CREDENTIAL_ENV_MAPPING = dict( secret='AZURE_SECRET', tenant='AZURE_TENANT', ad_user='AZURE_AD_USER', - password='AZURE_PASSWORD' + password='AZURE_PASSWORD', + cloud_environment='AZURE_CLOUD_ENVIRONMENT', ) AZURE_CONFIG_SETTINGS = dict( @@ -232,7 +240,7 @@ AZURE_CONFIG_SETTINGS = dict( group_by_tag='AZURE_GROUP_BY_TAG' ) -AZURE_MIN_VERSION = "0.30.0rc5" +AZURE_MIN_VERSION = "2.0.0" def azure_id_to_dict(id): @@ -249,6 +257,7 @@ class AzureRM(object): def __init__(self, args): self._args = args + self._cloud_environment = None self._compute_client = None self._resource_client = None self._network_client = None @@ -262,6 +271,26 @@ class AzureRM(object): self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " "or define a profile in ~/.azure/credentials.") + # if cloud_environment specified, look up/build Cloud object + raw_cloud_env = self.credentials.get('cloud_environment') + if not raw_cloud_env: + self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default + else: + # try to look up "well-known" values via the name attribute on azure_cloud members + all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] + matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] + if len(matched_clouds) == 1: + self._cloud_environment = matched_clouds[0] + elif len(matched_clouds) > 1: + self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) + else: + if not urlparse.urlparse(raw_cloud_env).scheme: + self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) + try: + self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) + except Exception as e: + self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) + if self.credentials.get('subscription_id', None) is None: self.fail("Credentials did not include a subscription_id value.") self.log("setting subscription_id") @@ -272,16 +301,23 @@ class AzureRM(object): self.credentials.get('tenant') is not None: self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], secret=self.credentials['secret'], - tenant=self.credentials['tenant']) + tenant=self.credentials['tenant'], + cloud_environment=self._cloud_environment) elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: - self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password']) + tenant = self.credentials.get('tenant') + if not tenant: + tenant = 'common' + self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], + self.credentials['password'], + tenant=tenant, + cloud_environment=self._cloud_environment) else: self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " "Credentials must include client_id, secret and tenant or ad_user and password.") def log(self, msg): if self.debug: - print (msg + u'\n') + print(msg + u'\n') def fail(self, msg): raise Exception(msg) @@ -341,6 +377,10 @@ class AzureRM(object): self.log('Received credentials from parameters.') return arg_credentials + if arg_credentials['ad_user'] is not None: + self.log('Received credentials from parameters.') + return arg_credentials + # try environment env_credentials = self._get_env_credentials() if env_credentials: @@ -372,7 +412,12 @@ class AzureRM(object): def network_client(self): self.log('Getting network client') if not self._network_client: - self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id) + self._network_client = NetworkManagementClient( + self.azure_credentials, + self.subscription_id, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2017-06-01' + ) self._register('Microsoft.Network') return self._network_client @@ -380,14 +425,24 @@ class AzureRM(object): def rm_client(self): self.log('Getting resource manager client') if not self._resource_client: - self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id) + self._resource_client = ResourceManagementClient( + self.azure_credentials, + self.subscription_id, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2017-05-10' + ) return self._resource_client @property def compute_client(self): self.log('Getting compute client') if not self._compute_client: - self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id) + self._compute_client = ComputeManagementClient( + self.azure_credentials, + self.subscription_id, + base_url=self._cloud_environment.endpoints.resource_manager, + api_version='2017-03-30' + ) self._register('Microsoft.Compute') return self._compute_client @@ -440,7 +495,7 @@ class AzureInventory(object): self.include_powerstate = False self.get_inventory() - print (self._json_format_dict(pretty=self._args.pretty)) + print(self._json_format_dict(pretty=self._args.pretty)) sys.exit(0) def _parse_cli_args(self): @@ -448,13 +503,13 @@ class AzureInventory(object): parser = argparse.ArgumentParser( description='Produce an Ansible Inventory file for an Azure subscription') parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') + help='List instances (default: True)') parser.add_argument('--debug', action='store_true', default=False, - help='Send debug messages to STDOUT') + help='Send debug messages to STDOUT') parser.add_argument('--host', action='store', - help='Get all information about an instance') + help='Get all information about an instance') parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty print JSON output(default: False)') + help='Pretty print JSON output(default: False)') parser.add_argument('--profile', action='store', help='Azure profile contained in ~/.azure/credentials') parser.add_argument('--subscription_id', action='store', @@ -465,10 +520,12 @@ class AzureInventory(object): help='Azure Client Secret') parser.add_argument('--tenant', action='store', help='Azure Tenant Id') - parser.add_argument('--ad-user', action='store', + parser.add_argument('--ad_user', action='store', help='Active Directory User') parser.add_argument('--password', action='store', help='password') + parser.add_argument('--cloud_environment', action='store', + help='Azure Cloud Environment name or metadata discovery URL') parser.add_argument('--resource-groups', action='store', help='Return inventory for comma separated list of resource group names') parser.add_argument('--tags', action='store', @@ -486,8 +543,7 @@ class AzureInventory(object): try: virtual_machines = self._compute_client.virtual_machines.list(resource_group) except Exception as exc: - sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, - str(exc))) + sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) if self._args.host or self.tags: selected_machines = self._selected_machines(virtual_machines) self._load_machines(selected_machines) @@ -510,7 +566,7 @@ class AzureInventory(object): for machine in machines: id_dict = azure_id_to_dict(machine.id) - #TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets + # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets # fixed, we should remove the .lower(). Opened Issue # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 resource_group = id_dict['resourceGroups'].lower() @@ -538,7 +594,7 @@ class AzureInventory(object): mac_address=None, plan=(machine.plan.name if machine.plan else None), virtual_machine_size=machine.hardware_profile.vm_size, - computer_name=machine.os_profile.computer_name, + computer_name=(machine.os_profile.computer_name if machine.os_profile else None), provisioning_state=machine.provisioning_state, ) @@ -559,7 +615,7 @@ class AzureInventory(object): ) # Add windows details - if machine.os_profile.windows_configuration is not None: + if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: host_vars['windows_auto_updates_enabled'] = \ machine.os_profile.windows_configuration.enable_automatic_updates host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone @@ -790,13 +846,10 @@ class AzureInventory(object): def main(): if not HAS_AZURE: - sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC)) - - if Version(azure_compute_version) < Version(AZURE_MIN_VERSION): - sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} " - "Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version)) + sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC)) AzureInventory() + if __name__ == '__main__': main() diff --git a/awx/settings/defaults.py b/awx/settings/defaults.py index 76ce8414c1..40c005c32b 100644 --- a/awx/settings/defaults.py +++ b/awx/settings/defaults.py @@ -481,6 +481,9 @@ if is_testing(): 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, + 'ephemeral': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + }, } else: CACHES = { @@ -488,6 +491,9 @@ else: 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'memcached:11211', }, + 'ephemeral': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + }, } # Social Auth configuration. @@ -716,7 +722,7 @@ VMWARE_GROUP_FILTER = r'^.+$' VMWARE_HOST_FILTER = r'^.+$' VMWARE_EXCLUDE_EMPTY_GROUPS = True - +VMWARE_VALIDATE_CERTS = False # --------------------------- # -- Google Compute Engine -- # --------------------------- diff --git a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-ldap.form.js b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-ldap.form.js index 6bd55b801f..c4fa3c1a6b 100644 --- a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-ldap.form.js +++ b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-ldap.form.js @@ -100,7 +100,7 @@ export default ['i18n', function(i18n) { }, save: { ngClick: 'vm.formSave()', - ngDisabled: "license_type !== 'enterprise' || form.$invalid || form.$pending" + ngDisabled: "license_type !== 'enterprise' && license_type !== 'open' || configuration_ldap_template_form.$invalid || configuration_ldap_template_form.$pending" } } }; diff --git a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-radius.form.js b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-radius.form.js index 333d5be2c3..c1d6873303 100644 --- a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-radius.form.js +++ b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-radius.form.js @@ -39,7 +39,7 @@ export default ['i18n', function(i18n) { }, save: { ngClick: 'vm.formSave()', - ngDisabled: "license_type !== 'enterprise' || form.$invalid || form.$pending" + ngDisabled: "license_type !== 'enterprise' && license_type !== 'open' || configuration_radius_template_form.$invalid || configuration_radius_template_form.$pending" } } }; diff --git a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-saml.form.js b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-saml.form.js index 08dd556ba3..dd3ed43708 100644 --- a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-saml.form.js +++ b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-saml.form.js @@ -92,7 +92,7 @@ export default ['i18n', function(i18n) { }, save: { ngClick: 'vm.formSave()', - ngDisabled: "license_type !== 'enterprise' || form.$invalid || form.$pending" + ngDisabled: "license_type !== 'enterprise' && license_type !== 'open' || configuration_saml_template_form.$invalid || configuration_saml_template_form.$pending" } } }; diff --git a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-tacacs.form.js b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-tacacs.form.js index d236b5f0cf..be235399c2 100644 --- a/awx/ui/client/src/configuration/auth-form/sub-forms/auth-tacacs.form.js +++ b/awx/ui/client/src/configuration/auth-form/sub-forms/auth-tacacs.form.js @@ -52,7 +52,7 @@ export default ['i18n', function(i18n) { }, save: { ngClick: 'vm.formSave()', - ngDisabled: "license_type !== 'enterprise' || form.$invalid || form.$pending" + ngDisabled: "license_type !== 'enterprise' && license_type !== 'open' || configuration_tacacs_template_form.$invalid || configuration_tacacs_template_form.$pending" } } }; diff --git a/awx/ui/client/src/configuration/configuration.controller.js b/awx/ui/client/src/configuration/configuration.controller.js index 72cae0f2d2..465c89890e 100644 --- a/awx/ui/client/src/configuration/configuration.controller.js +++ b/awx/ui/client/src/configuration/configuration.controller.js @@ -95,7 +95,11 @@ export default [ } else { if (key === "LICENSE") { - $scope.license_type = data[key].license_type; + if (_.isEmpty(data[key])) { + $scope.license_type = "open"; + } else { + $scope.license_type = data[key].license_type; + } } //handle nested objects if(ConfigurationUtils.isEmpty(data[key])) { diff --git a/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.partial.html b/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.partial.html index 4660f008cb..437ff3a242 100644 --- a/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.partial.html +++ b/awx/ui/client/src/inventories-hosts/hosts/related/groups/hosts-related-groups.partial.html @@ -19,7 +19,10 @@