diff --git a/awx/api/generics.py b/awx/api/generics.py index dddd9d9e6f..17d1bdd55e 100644 --- a/awx/api/generics.py +++ b/awx/api/generics.py @@ -63,7 +63,6 @@ __all__ = [ 'SubDetailAPIView', 'ResourceAccessList', 'ParentMixin', - 'DeleteLastUnattachLabelMixin', 'SubListAttachDetachAPIView', 'CopyAPIView', 'BaseUsersList', @@ -775,28 +774,6 @@ class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView): return {'id': None} -class DeleteLastUnattachLabelMixin(object): - """ - Models for which you want the last instance to be deleted from the database - when the last disassociate is called should inherit from this class. Further, - the model should implement is_detached() - """ - - def unattach(self, request, *args, **kwargs): - (sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request) - if res: - return res - - res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id) - - obj = self.model.objects.get(id=sub_id) - - if obj.is_detached(): - obj.delete() - - return res - - class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView): pass diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 0314eb1fba..47f121a58f 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -2923,6 +2923,12 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', + 'ask_execution_environment_on_launch', + 'ask_labels_on_launch', + 'ask_forks_on_launch', + 'ask_job_slice_count_on_launch', + 'ask_timeout_on_launch', + 'ask_instance_groups_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode', @@ -3185,7 +3191,7 @@ class JobRelaunchSerializer(BaseSerializer): return attrs -class JobCreateScheduleSerializer(BaseSerializer): +class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer): can_schedule = serializers.SerializerMethodField() prompts = serializers.SerializerMethodField() @@ -3211,11 +3217,14 @@ class JobCreateScheduleSerializer(BaseSerializer): try: config = obj.launch_config ret = config.prompts_dict(display=True) - if 'inventory' in ret: - ret['inventory'] = self._summarize('inventory', ret['inventory']) - if 'credentials' in ret: - all_creds = [self._summarize('credential', cred) for cred in ret['credentials']] - ret['credentials'] = all_creds + for field_name in ('inventory', 'execution_environment'): + if field_name in ret: + ret[field_name] = self._summarize(field_name, ret[field_name]) + for field_name, singular in (('credentials', 'credential'), ('instance_groups', 'instance_group')): + if field_name in ret: + ret[field_name] = [self._summarize(singular, obj) for obj in ret[field_name]] + if 'labels' in ret: + ret['labels'] = self._summary_field_labels(config) return ret except JobLaunchConfig.DoesNotExist: return {'all': _('Unknown, job may have been ran before launch configurations were saved.')} @@ -3388,6 +3397,9 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + class Meta: model = WorkflowJobTemplate fields = ( @@ -3406,6 +3418,11 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo 'webhook_service', 'webhook_credential', '-execution_environment', + 'ask_labels_on_launch', + 'ask_skip_tags_on_launch', + 'ask_tags_on_launch', + 'skip_tags', + 'job_tags', ) def get_related(self, obj): @@ -3449,7 +3466,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo # process char_prompts, these are not direct fields on the model mock_obj = self.Meta.model() - for field_name in ('scm_branch', 'limit'): + for field_name in ('scm_branch', 'limit', 'skip_tags', 'job_tags'): if field_name in attrs: setattr(mock_obj, field_name, attrs[field_name]) attrs.pop(field_name) @@ -3475,6 +3492,9 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer): limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) + class Meta: model = WorkflowJob fields = ( @@ -3494,6 +3514,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer): 'webhook_service', 'webhook_credential', 'webhook_guid', + 'skip_tags', + 'job_tags', ) def get_related(self, obj): @@ -3610,6 +3632,9 @@ class LaunchConfigurationBaseSerializer(BaseSerializer): skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None) verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES) + forks = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None) + job_slice_count = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None) + timeout = serializers.IntegerField(required=False, allow_null=True, default=None) exclude_errors = () class Meta: @@ -3625,13 +3650,21 @@ class LaunchConfigurationBaseSerializer(BaseSerializer): 'skip_tags', 'diff_mode', 'verbosity', + 'execution_environment', + 'forks', + 'job_slice_count', + 'timeout', ) def get_related(self, obj): res = super(LaunchConfigurationBaseSerializer, self).get_related(obj) if obj.inventory_id: res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id}) + if obj.execution_environment_id: + res['execution_environment'] = self.reverse('api:execution_environment_detail', kwargs={'pk': obj.execution_environment_id}) + res['labels'] = self.reverse('api:{}_labels_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk}) res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk}) + res['instance_groups'] = self.reverse('api:{}_instance_groups_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk}) return res def _build_mock_obj(self, attrs): @@ -4083,7 +4116,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer): class JobLaunchSerializer(BaseSerializer): - # Representational fields passwords_needed_to_start = serializers.ReadOnlyField() can_start_without_user_input = serializers.BooleanField(read_only=True) @@ -4106,6 +4138,12 @@ class JobLaunchSerializer(BaseSerializer): skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) limit = serializers.CharField(required=False, write_only=True, allow_blank=True) verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True) + execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True) + labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True) + forks = serializers.IntegerField(required=False, write_only=True, min_value=0) + job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0) + timeout = serializers.IntegerField(required=False, write_only=True) + instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True) class Meta: model = JobTemplate @@ -4133,6 +4171,12 @@ class JobLaunchSerializer(BaseSerializer): 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', + 'ask_execution_environment_on_launch', + 'ask_labels_on_launch', + 'ask_forks_on_launch', + 'ask_job_slice_count_on_launch', + 'ask_timeout_on_launch', + 'ask_instance_groups_on_launch', 'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start', @@ -4140,6 +4184,12 @@ class JobLaunchSerializer(BaseSerializer): 'job_template_data', 'defaults', 'verbosity', + 'execution_environment', + 'labels', + 'forks', + 'job_slice_count', + 'timeout', + 'instance_groups', ) read_only_fields = ( 'ask_scm_branch_on_launch', @@ -4152,6 +4202,12 @@ class JobLaunchSerializer(BaseSerializer): 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', + 'ask_execution_environment_on_launch', + 'ask_labels_on_launch', + 'ask_forks_on_launch', + 'ask_job_slice_count_on_launch', + 'ask_timeout_on_launch', + 'ask_instance_groups_on_launch', ) def get_credential_needed_to_start(self, obj): @@ -4176,6 +4232,17 @@ class JobLaunchSerializer(BaseSerializer): if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields: cred_dict['vault_id'] = cred.get_input('vault_id', default=None) defaults_dict.setdefault(field_name, []).append(cred_dict) + elif field_name == 'execution_environment': + if obj.execution_environment_id: + defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name} + else: + defaults_dict[field_name] = {} + elif field_name == 'labels': + for label in obj.labels.all(): + label_dict = {'id': label.id, 'name': label.name} + defaults_dict.setdefault(field_name, []).append(label_dict) + elif field_name == 'instance_groups': + defaults_dict[field_name] = [] else: defaults_dict[field_name] = getattr(obj, field_name) return defaults_dict @@ -4283,6 +4350,10 @@ class WorkflowJobLaunchSerializer(BaseSerializer): scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True) workflow_job_template_data = serializers.SerializerMethodField() + labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True) + skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) + job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) + class Meta: model = WorkflowJobTemplate fields = ( @@ -4302,8 +4373,22 @@ class WorkflowJobLaunchSerializer(BaseSerializer): 'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch', + 'ask_labels_on_launch', + 'labels', + 'ask_skip_tags_on_launch', + 'ask_tags_on_launch', + 'skip_tags', + 'job_tags', + ) + read_only_fields = ( + 'ask_inventory_on_launch', + 'ask_variables_on_launch', + 'ask_skip_tags_on_launch', + 'ask_labels_on_launch', + 'ask_limit_on_launch', + 'ask_scm_branch_on_launch', + 'ask_tags_on_launch', ) - read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch') def get_survey_enabled(self, obj): if obj: @@ -4311,10 +4396,15 @@ class WorkflowJobLaunchSerializer(BaseSerializer): return False def get_defaults(self, obj): + defaults_dict = {} for field_name in WorkflowJobTemplate.get_ask_mapping().keys(): if field_name == 'inventory': defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None)) + elif field_name == 'labels': + for label in obj.labels.all(): + label_dict = {"id": label.id, "name": label.name} + defaults_dict.setdefault(field_name, []).append(label_dict) else: defaults_dict[field_name] = getattr(obj, field_name) return defaults_dict @@ -4323,6 +4413,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer): return dict(name=obj.name, id=obj.id, description=obj.description) def validate(self, attrs): + template = self.instance accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs) @@ -4340,6 +4431,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer): WFJT_inventory = template.inventory WFJT_limit = template.limit WFJT_scm_branch = template.scm_branch + super(WorkflowJobLaunchSerializer, self).validate(attrs) template.extra_vars = WFJT_extra_vars template.inventory = WFJT_inventory @@ -4731,6 +4823,8 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria if isinstance(obj.unified_job_template, SystemJobTemplate): summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type + # We are not showing instance groups on summary fields because JTs don't either + if 'inventory' in summary_fields: return summary_fields diff --git a/awx/api/urls/label.py b/awx/api/urls/label.py index 5fc0a4f629..f7158275ae 100644 --- a/awx/api/urls/label.py +++ b/awx/api/urls/label.py @@ -3,7 +3,7 @@ from django.urls import re_path -from awx.api.views import LabelList, LabelDetail +from awx.api.views.labels import LabelList, LabelDetail urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P[0-9]+)/$', LabelDetail.as_view(), name='label_detail')] diff --git a/awx/api/urls/schedule.py b/awx/api/urls/schedule.py index 87907eda8f..40d839199b 100644 --- a/awx/api/urls/schedule.py +++ b/awx/api/urls/schedule.py @@ -3,7 +3,7 @@ from django.urls import re_path -from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList +from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList, ScheduleLabelsList, ScheduleInstanceGroupList urls = [ @@ -11,6 +11,8 @@ urls = [ re_path(r'^(?P[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'), re_path(r'^(?P[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'), re_path(r'^(?P[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'), + re_path(r'^(?P[0-9]+)/labels/$', ScheduleLabelsList.as_view(), name='schedule_labels_list'), + re_path(r'^(?P[0-9]+)/instance_groups/$', ScheduleInstanceGroupList.as_view(), name='schedule_instance_groups_list'), ] __all__ = ['urls'] diff --git a/awx/api/urls/workflow_job_node.py b/awx/api/urls/workflow_job_node.py index 5b246c95b4..da029b34c2 100644 --- a/awx/api/urls/workflow_job_node.py +++ b/awx/api/urls/workflow_job_node.py @@ -10,6 +10,8 @@ from awx.api.views import ( WorkflowJobNodeFailureNodesList, WorkflowJobNodeAlwaysNodesList, WorkflowJobNodeCredentialsList, + WorkflowJobNodeLabelsList, + WorkflowJobNodeInstanceGroupsList, ) @@ -20,6 +22,8 @@ urls = [ re_path(r'^(?P[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'), re_path(r'^(?P[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'), re_path(r'^(?P[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'), + re_path(r'^(?P[0-9]+)/labels/$', WorkflowJobNodeLabelsList.as_view(), name='workflow_job_node_labels_list'), + re_path(r'^(?P[0-9]+)/instance_groups/$', WorkflowJobNodeInstanceGroupsList.as_view(), name='workflow_job_node_instance_groups_list'), ] __all__ = ['urls'] diff --git a/awx/api/urls/workflow_job_template_node.py b/awx/api/urls/workflow_job_template_node.py index bcd61aed67..d4d992a043 100644 --- a/awx/api/urls/workflow_job_template_node.py +++ b/awx/api/urls/workflow_job_template_node.py @@ -11,6 +11,8 @@ from awx.api.views import ( WorkflowJobTemplateNodeAlwaysNodesList, WorkflowJobTemplateNodeCredentialsList, WorkflowJobTemplateNodeCreateApproval, + WorkflowJobTemplateNodeLabelsList, + WorkflowJobTemplateNodeInstanceGroupsList, ) @@ -21,6 +23,8 @@ urls = [ re_path(r'^(?P[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'), re_path(r'^(?P[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'), re_path(r'^(?P[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'), + re_path(r'^(?P[0-9]+)/labels/$', WorkflowJobTemplateNodeLabelsList.as_view(), name='workflow_job_template_node_labels_list'), + re_path(r'^(?P[0-9]+)/instance_groups/$', WorkflowJobTemplateNodeInstanceGroupsList.as_view(), name='workflow_job_template_node_instance_groups_list'), re_path(r'^(?P[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'), ] diff --git a/awx/api/views/__init__.py b/awx/api/views/__init__.py index f6b7fbbabe..dfc1140a70 100644 --- a/awx/api/views/__init__.py +++ b/awx/api/views/__init__.py @@ -22,6 +22,7 @@ from django.conf import settings from django.core.exceptions import FieldError, ObjectDoesNotExist from django.db.models import Q, Sum from django.db import IntegrityError, ProgrammingError, transaction, connection +from django.db.models.fields.related import ManyToManyField, ForeignKey from django.shortcuts import get_object_or_404 from django.utils.safestring import mark_safe from django.utils.timezone import now @@ -68,7 +69,6 @@ from awx.api.generics import ( APIView, BaseUsersList, CopyAPIView, - DeleteLastUnattachLabelMixin, GenericAPIView, ListAPIView, ListCreateAPIView, @@ -85,6 +85,7 @@ from awx.api.generics import ( SubListCreateAttachDetachAPIView, SubListDestroyAPIView, ) +from awx.api.views.labels import LabelSubListCreateAttachDetachView from awx.api.versioning import reverse from awx.main import models from awx.main.utils import ( @@ -617,6 +618,19 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase): parent_model = models.Schedule +class ScheduleLabelsList(LabelSubListCreateAttachDetachView): + + parent_model = models.Schedule + + +class ScheduleInstanceGroupList(SubListAttachDetachAPIView): + + model = models.InstanceGroup + serializer_class = serializers.InstanceGroupSerializer + parent_model = models.Schedule + relationship = 'instance_groups' + + class ScheduleUnifiedJobsList(SubListAPIView): model = models.UnifiedJob @@ -2381,10 +2395,13 @@ class JobTemplateLaunch(RetrieveAPIView): for field, ask_field_name in modified_ask_mapping.items(): if not getattr(obj, ask_field_name): data.pop(field, None) - elif field == 'inventory': + elif isinstance(getattr(obj.__class__, field).field, ForeignKey): data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) - elif field == 'credentials': - data[field] = [cred.id for cred in obj.credentials.all()] + elif isinstance(getattr(obj.__class__, field).field, ManyToManyField): + if field == 'instance_groups': + data[field] = [] + continue + data[field] = [item.id for item in getattr(obj, field).all()] else: data[field] = getattr(obj, field) return data @@ -2719,28 +2736,9 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView): return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created) -class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView): +class JobTemplateLabelList(LabelSubListCreateAttachDetachView): - model = models.Label - serializer_class = serializers.LabelSerializer parent_model = models.JobTemplate - relationship = 'labels' - - def post(self, request, *args, **kwargs): - # If a label already exists in the database, attach it instead of erroring out - # that it already exists - if 'id' not in request.data and 'name' in request.data and 'organization' in request.data: - existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization']) - if existing.exists(): - existing = existing[0] - request.data['id'] = existing.id - del request.data['name'] - del request.data['organization'] - if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100: - return Response( - dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST - ) - return super(JobTemplateLabelList, self).post(request, *args, **kwargs) class JobTemplateCallback(GenericAPIView): @@ -2966,6 +2964,22 @@ class WorkflowJobNodeCredentialsList(SubListAPIView): relationship = 'credentials' +class WorkflowJobNodeLabelsList(SubListAPIView): + + model = models.Label + serializer_class = serializers.LabelSerializer + parent_model = models.WorkflowJobNode + relationship = 'labels' + + +class WorkflowJobNodeInstanceGroupsList(SubListAttachDetachAPIView): + + model = models.InstanceGroup + serializer_class = serializers.InstanceGroupSerializer + parent_model = models.WorkflowJobNode + relationship = 'instance_groups' + + class WorkflowJobTemplateNodeList(ListCreateAPIView): model = models.WorkflowJobTemplateNode @@ -2984,6 +2998,19 @@ class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase): parent_model = models.WorkflowJobTemplateNode +class WorkflowJobTemplateNodeLabelsList(LabelSubListCreateAttachDetachView): + + parent_model = models.WorkflowJobTemplateNode + + +class WorkflowJobTemplateNodeInstanceGroupsList(SubListAttachDetachAPIView): + + model = models.InstanceGroup + serializer_class = serializers.InstanceGroupSerializer + parent_model = models.WorkflowJobTemplateNode + relationship = 'instance_groups' + + class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView): model = models.WorkflowJobTemplateNode @@ -3196,13 +3223,17 @@ class WorkflowJobTemplateLaunch(RetrieveAPIView): data['extra_vars'] = extra_vars modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping() modified_ask_mapping.pop('extra_vars') - for field_name, ask_field_name in obj.get_ask_mapping().items(): + + for field, ask_field_name in modified_ask_mapping.items(): if not getattr(obj, ask_field_name): - data.pop(field_name, None) - elif field_name == 'inventory': - data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None) + data.pop(field, None) + elif isinstance(getattr(obj.__class__, field).field, ForeignKey): + data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) + elif isinstance(getattr(obj.__class__, field).field, ManyToManyField): + data[field] = [item.id for item in getattr(obj, field).all()] else: - data[field_name] = getattr(obj, field_name) + data[field] = getattr(obj, field) + return data def post(self, request, *args, **kwargs): @@ -3689,15 +3720,21 @@ class JobCreateSchedule(RetrieveAPIView): extra_data=config.extra_data, survey_passwords=config.survey_passwords, inventory=config.inventory, + execution_environment=config.execution_environment, char_prompts=config.char_prompts, credentials=set(config.credentials.all()), + labels=set(config.labels.all()), + instance_groups=list(config.instance_groups.all()), ) if not request.user.can_access(models.Schedule, 'add', schedule_data): raise PermissionDenied() - creds_list = schedule_data.pop('credentials') + related_fields = ('credentials', 'labels', 'instance_groups') + related = [schedule_data.pop(relationship) for relationship in related_fields] schedule = models.Schedule.objects.create(**schedule_data) - schedule.credentials.add(*creds_list) + for relationship, items in zip(related_fields, related): + for item in items: + getattr(schedule, relationship).add(item) data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model @@ -4428,18 +4465,6 @@ class NotificationDetail(RetrieveAPIView): serializer_class = serializers.NotificationSerializer -class LabelList(ListCreateAPIView): - - model = models.Label - serializer_class = serializers.LabelSerializer - - -class LabelDetail(RetrieveUpdateAPIView): - - model = models.Label - serializer_class = serializers.LabelSerializer - - class ActivityStreamList(SimpleListAPIView): model = models.ActivityStream diff --git a/awx/api/views/inventory.py b/awx/api/views/inventory.py index 65e59790ac..31b9cf23ae 100644 --- a/awx/api/views/inventory.py +++ b/awx/api/views/inventory.py @@ -18,8 +18,6 @@ from rest_framework import status # AWX from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate -from awx.main.models.label import Label - from awx.api.generics import ( ListCreateAPIView, RetrieveUpdateDestroyAPIView, @@ -27,9 +25,8 @@ from awx.api.generics import ( SubListAttachDetachAPIView, ResourceAccessList, CopyAPIView, - DeleteLastUnattachLabelMixin, - SubListCreateAttachDetachAPIView, ) +from awx.api.views.labels import LabelSubListCreateAttachDetachView from awx.api.serializers import ( @@ -39,7 +36,6 @@ from awx.api.serializers import ( InstanceGroupSerializer, InventoryUpdateEventSerializer, JobTemplateSerializer, - LabelSerializer, ) from awx.api.views.mixin import RelatedJobsPreventDeleteMixin @@ -157,28 +153,9 @@ class InventoryJobTemplateList(SubListAPIView): return qs.filter(inventory=parent) -class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView): +class InventoryLabelList(LabelSubListCreateAttachDetachView): - model = Label - serializer_class = LabelSerializer parent_model = Inventory - relationship = 'labels' - - def post(self, request, *args, **kwargs): - # If a label already exists in the database, attach it instead of erroring out - # that it already exists - if 'id' not in request.data and 'name' in request.data and 'organization' in request.data: - existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization']) - if existing.exists(): - existing = existing[0] - request.data['id'] = existing.id - del request.data['name'] - del request.data['organization'] - if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100: - return Response( - dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST - ) - return super(InventoryLabelList, self).post(request, *args, **kwargs) class InventoryCopy(CopyAPIView): diff --git a/awx/api/views/labels.py b/awx/api/views/labels.py new file mode 100644 index 0000000000..95a7f42941 --- /dev/null +++ b/awx/api/views/labels.py @@ -0,0 +1,71 @@ +# AWX +from awx.api.generics import SubListCreateAttachDetachAPIView, RetrieveUpdateAPIView, ListCreateAPIView +from awx.main.models import Label +from awx.api.serializers import LabelSerializer + +# Django +from django.utils.translation import gettext_lazy as _ + +# Django REST Framework +from rest_framework.response import Response +from rest_framework.status import HTTP_400_BAD_REQUEST + + +class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView): + """ + For related labels lists like /api/v2/inventories/N/labels/ + + We want want the last instance to be deleted from the database + when the last disassociate happens. + + Subclasses need to define parent_model + """ + + model = Label + serializer_class = LabelSerializer + relationship = 'labels' + + def unattach(self, request, *args, **kwargs): + (sub_id, res) = super().unattach_validate(request) + if res: + return res + + res = super().unattach_by_id(request, sub_id) + + obj = self.model.objects.get(id=sub_id) + + if obj.is_detached(): + obj.delete() + + return res + + def post(self, request, *args, **kwargs): + # If a label already exists in the database, attach it instead of erroring out + # that it already exists + if 'id' not in request.data and 'name' in request.data and 'organization' in request.data: + existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization']) + if existing.exists(): + existing = existing[0] + request.data['id'] = existing.id + del request.data['name'] + del request.data['organization'] + + # Give a 400 error if we have attached too many labels to this object + label_filter = self.parent_model._meta.get_field(self.relationship).remote_field.name + if Label.objects.filter(**{label_filter: self.kwargs['pk']}).count() > 100: + return Response(dict(msg=_(f'Maximum number of labels for {self.parent_model._meta.verbose_name_raw} reached.')), status=HTTP_400_BAD_REQUEST) + + return super().post(request, *args, **kwargs) + + +class LabelDetail(RetrieveUpdateAPIView): + + model = Label + serializer_class = LabelSerializer + + +class LabelList(ListCreateAPIView): + + name = _("Labels") + model = Label + serializer_class = LabelSerializer diff --git a/awx/main/access.py b/awx/main/access.py index ba91d290c1..e8deea8f36 100644 --- a/awx/main/access.py +++ b/awx/main/access.py @@ -12,7 +12,7 @@ from django.conf import settings from django.db.models import Q, Prefetch from django.contrib.auth.models import User from django.utils.translation import gettext_lazy as _ -from django.core.exceptions import ObjectDoesNotExist +from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist # Django REST Framework from rest_framework.exceptions import ParseError, PermissionDenied @@ -281,13 +281,23 @@ class BaseAccess(object): """ return True + def assure_relationship_exists(self, obj, relationship): + if '.' in relationship: + return # not attempting validation for complex relationships now + try: + obj._meta.get_field(relationship) + except FieldDoesNotExist: + raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}') + def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): + self.assure_relationship_exists(obj, relationship) if skip_sub_obj_read_check: return self.can_change(obj, None) else: return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj)) def can_unattach(self, obj, sub_obj, relationship, data=None): + self.assure_relationship_exists(obj, relationship) return self.can_change(obj, data) def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False): @@ -328,6 +338,8 @@ class BaseAccess(object): role = getattr(resource, role_field, None) if role is None: # Handle special case where resource does not have direct roles + if role_field == 'read_role': + return self.user.can_access(type(resource), 'read', resource) access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field] return self.user.can_access(type(resource), access_method_type, resource, None) return self.user in role @@ -499,6 +511,21 @@ class BaseAccess(object): return False +class UnifiedCredentialsMixin(BaseAccess): + """ + The credentials many-to-many is a standard relationship for JT, jobs, and others + Permission to attach is always use permission, and permission to unattach is admin to the parent object + """ + + @check_superuser + def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): + if relationship == 'credentials': + if not isinstance(sub_obj, Credential): + raise RuntimeError(f'Can only attach credentials to credentials relationship, got {type(sub_obj)}') + return self.can_change(obj, None) and (self.user in sub_obj.use_role) + return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) + + class NotificationAttachMixin(BaseAccess): """For models that can have notifications attached @@ -1031,7 +1058,7 @@ class GroupAccess(BaseAccess): return bool(obj and self.user in obj.inventory.admin_role) -class InventorySourceAccess(NotificationAttachMixin, BaseAccess): +class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess): """ I can see inventory sources whenever I can see their inventory. I can change inventory sources whenever I can change their inventory. @@ -1075,18 +1102,6 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess): return self.user in obj.inventory.update_role return False - @check_superuser - def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - if relationship == 'credentials' and isinstance(sub_obj, Credential): - return obj and obj.inventory and self.user in obj.inventory.admin_role and self.user in sub_obj.use_role - return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) - - @check_superuser - def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs): - if relationship == 'credentials' and isinstance(sub_obj, Credential): - return obj and obj.inventory and self.user in obj.inventory.admin_role - return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs) - class InventoryUpdateAccess(BaseAccess): """ @@ -1485,7 +1500,7 @@ class ProjectUpdateAccess(BaseAccess): return obj and self.user in obj.project.admin_role -class JobTemplateAccess(NotificationAttachMixin, BaseAccess): +class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess): """ I can see job templates when: - I have read role for the job template. @@ -1549,8 +1564,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess): if self.user not in inventory.use_role: return False - ee = get_value(ExecutionEnvironment, 'execution_environment') - if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee): + if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'): return False project = get_value(Project, 'project') @@ -1600,10 +1614,8 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess): if self.changes_are_non_sensitive(obj, data): return True - if data.get('execution_environment'): - ee = get_object_from_data('execution_environment', ExecutionEnvironment, data) - if not self.user.can_access(ExecutionEnvironment, 'read', ee): - return False + if not self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'): + return False for required_field, cls in (('inventory', Inventory), ('project', Project)): is_mandatory = True @@ -1667,17 +1679,13 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess): if not obj.organization: return False return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role - if relationship == 'credentials' and isinstance(sub_obj, Credential): - return self.user in obj.admin_role and self.user in sub_obj.use_role return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) @check_superuser def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs): if relationship == "instance_groups": return self.can_attach(obj, sub_obj, relationship, *args, **kwargs) - if relationship == 'credentials' and isinstance(sub_obj, Credential): - return self.user in obj.admin_role - return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs) + return super(JobTemplateAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs) class JobAccess(BaseAccess): @@ -1824,7 +1832,7 @@ class SystemJobAccess(BaseAccess): return False # no relaunching of system jobs -class JobLaunchConfigAccess(BaseAccess): +class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess): """ Launch configs must have permissions checked for - relaunching @@ -1832,63 +1840,69 @@ class JobLaunchConfigAccess(BaseAccess): In order to create a new object with a copy of this launch config, I need: - use access to related inventory (if present) + - read access to Execution Environment (if present), unless the specified ee is already in the template - use role to many-related credentials (if any present) + - read access to many-related labels (if any present), unless the specified label is already in the template + - read access to many-related instance groups (if any present), unless the specified instance group is already in the template """ model = JobLaunchConfig select_related = 'job' prefetch_related = ('credentials', 'inventory') - def _unusable_creds_exist(self, qs): - return qs.exclude(pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')).exists() + M2M_CHECKS = {'credentials': Credential, 'labels': Label, 'instance_groups': InstanceGroup} - def has_credentials_access(self, obj): - # user has access if no related credentials exist that the user lacks use role for - return not self._unusable_creds_exist(obj.credentials) + def _related_filtered_queryset(self, cls): + if cls is Label: + return LabelAccess(self.user).filtered_queryset() + elif cls is InstanceGroup: + return InstanceGroupAccess(self.user).filtered_queryset() + else: + return cls._accessible_pk_qs(cls, self.user, 'use_role') + + def has_obj_m2m_access(self, obj): + for relationship, cls in self.M2M_CHECKS.items(): + if getattr(obj, relationship).exclude(pk__in=self._related_filtered_queryset(cls)).exists(): + return False + return True @check_superuser def can_add(self, data, template=None): # This is a special case, we don't check related many-to-many elsewhere # launch RBAC checks use this - if 'credentials' in data and data['credentials'] or 'reference_obj' in data: - if 'reference_obj' in data: - prompted_cred_qs = data['reference_obj'].credentials.all() - else: - # If given model objects, only use the primary key from them - cred_pks = [cred.pk for cred in data['credentials']] - if template: - for cred in template.credentials.all(): - if cred.pk in cred_pks: - cred_pks.remove(cred.pk) - prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks) - if self._unusable_creds_exist(prompted_cred_qs): + if 'reference_obj' in data: + if not self.has_obj_m2m_access(data['reference_obj']): return False - return self.check_related('inventory', Inventory, data, role_field='use_role') + else: + for relationship, cls in self.M2M_CHECKS.items(): + if relationship in data and data[relationship]: + # If given model objects, only use the primary key from them + sub_obj_pks = [sub_obj.pk for sub_obj in data[relationship]] + if template: + for sub_obj in getattr(template, relationship).all(): + if sub_obj.pk in sub_obj_pks: + sub_obj_pks.remove(sub_obj.pk) + if cls.objects.filter(pk__in=sub_obj_pks).exclude(pk__in=self._related_filtered_queryset(cls)).exists(): + return False + return self.check_related('inventory', Inventory, data, role_field='use_role') and self.check_related( + 'execution_environment', ExecutionEnvironment, data, role_field='read_role' + ) @check_superuser def can_use(self, obj): - return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj) + return ( + self.has_obj_m2m_access(obj) + and self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) + and self.check_related('execution_environment', ExecutionEnvironment, {}, obj=obj, role_field='read_role') + ) def can_change(self, obj, data): - return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') - - def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - if isinstance(sub_obj, Credential) and relationship == 'credentials': - return self.user in sub_obj.use_role - else: - raise NotImplementedError('Only credentials can be attached to launch configurations.') - - def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - if isinstance(sub_obj, Credential) and relationship == 'credentials': - if skip_sub_obj_read_check: - return True - else: - return self.user in sub_obj.read_role - else: - raise NotImplementedError('Only credentials can be attached to launch configurations.') + return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') and self.check_related( + 'execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role' + ) -class WorkflowJobTemplateNodeAccess(BaseAccess): +class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess): """ I can see/use a WorkflowJobTemplateNode if I have read permission to associated Workflow Job Template @@ -1911,7 +1925,7 @@ class WorkflowJobTemplateNodeAccess(BaseAccess): """ model = WorkflowJobTemplateNode - prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'credentials', 'workflow_job_template') + prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'workflow_job_template') def filtered_queryset(self): return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role')) @@ -1923,7 +1937,8 @@ class WorkflowJobTemplateNodeAccess(BaseAccess): return ( self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True) and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') - and JobLaunchConfigAccess(self.user).can_add(data) + and self.check_related('inventory', Inventory, data, role_field='use_role') + and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role') ) def wfjt_admin(self, obj): @@ -1932,17 +1947,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess): else: return self.user in obj.workflow_job_template.admin_role - def ujt_execute(self, obj): + def ujt_execute(self, obj, data=None): if not obj.unified_job_template: return True - return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj, role_field='execute_role', mandatory=True) + return self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True) def can_change(self, obj, data): - if not data: - return True - # should not be able to edit the prompts if lacking access to UJT or WFJT - return self.ujt_execute(obj) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data) + return self.ujt_execute(obj, data=data) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data) def can_delete(self, obj): return self.wfjt_admin(obj) @@ -1955,29 +1967,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess): return True def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - if not self.wfjt_admin(obj): - return False - if relationship == 'credentials': - # Need permission to related template to attach a credential - if not self.ujt_execute(obj): - return False - return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) - elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): - return self.check_same_WFJT(obj, sub_obj) - else: - raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship)) + if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): + return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj) + return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) - def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - if not self.wfjt_admin(obj): - return False - if relationship == 'credentials': - if not self.ujt_execute(obj): - return False - return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) - elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): - return self.check_same_WFJT(obj, sub_obj) - else: - raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship)) + def can_unattach(self, obj, sub_obj, relationship, data=None): + if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'): + return self.wfjt_admin(obj) + return super().can_unattach(obj, sub_obj, relationship, data=None) class WorkflowJobNodeAccess(BaseAccess): @@ -2052,13 +2049,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess): if not data: # So the browseable API will work return Organization.accessible_objects(self.user, 'workflow_admin_role').exists() - if data.get('execution_environment'): - ee = get_object_from_data('execution_environment', ExecutionEnvironment, data) - if not self.user.can_access(ExecutionEnvironment, 'read', ee): - return False - - return self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and self.check_related( - 'inventory', Inventory, data, role_field='use_role' + return bool( + self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) + and self.check_related('inventory', Inventory, data, role_field='use_role') + and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role') ) def can_copy(self, obj): @@ -2104,14 +2098,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess): if self.user.is_superuser: return True - if data and data.get('execution_environment'): - ee = get_object_from_data('execution_environment', ExecutionEnvironment, data) - if not self.user.can_access(ExecutionEnvironment, 'read', ee): - return False - return ( self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj) + and self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role') and self.user in obj.admin_role ) @@ -2518,7 +2508,7 @@ class UnifiedJobAccess(BaseAccess): return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True) -class ScheduleAccess(BaseAccess): +class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess): """ I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access """ @@ -2559,12 +2549,6 @@ class ScheduleAccess(BaseAccess): def can_delete(self, obj): return self.can_change(obj, {}) - def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) - - def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): - return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) - class NotificationTemplateAccess(BaseAccess): """ diff --git a/awx/main/migrations/0169_jt_prompt_everything_on_launch.py b/awx/main/migrations/0169_jt_prompt_everything_on_launch.py new file mode 100644 index 0000000000..b31f66e139 --- /dev/null +++ b/awx/main/migrations/0169_jt_prompt_everything_on_launch.py @@ -0,0 +1,225 @@ +# Generated by Django 3.2.13 on 2022-09-15 14:07 + +import awx.main.fields +import awx.main.utils.polymorphic +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0168_inventoryupdate_scm_revision'), + ] + + operations = [ + migrations.AddField( + model_name='joblaunchconfig', + name='execution_environment', + field=models.ForeignKey( + blank=True, + default=None, + help_text='The container image to be used for execution.', + null=True, + on_delete=awx.main.utils.polymorphic.SET_NULL, + related_name='joblaunchconfig_as_prompt', + to='main.executionenvironment', + ), + ), + migrations.AddField( + model_name='joblaunchconfig', + name='labels', + field=models.ManyToManyField(related_name='joblaunchconfig_labels', to='main.Label'), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_execution_environment_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_forks_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_instance_groups_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_job_slice_count_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_labels_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='jobtemplate', + name='ask_timeout_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='schedule', + name='execution_environment', + field=models.ForeignKey( + blank=True, + default=None, + help_text='The container image to be used for execution.', + null=True, + on_delete=awx.main.utils.polymorphic.SET_NULL, + related_name='schedule_as_prompt', + to='main.executionenvironment', + ), + ), + migrations.AddField( + model_name='schedule', + name='labels', + field=models.ManyToManyField(related_name='schedule_labels', to='main.Label'), + ), + migrations.AddField( + model_name='workflowjobnode', + name='execution_environment', + field=models.ForeignKey( + blank=True, + default=None, + help_text='The container image to be used for execution.', + null=True, + on_delete=awx.main.utils.polymorphic.SET_NULL, + related_name='workflowjobnode_as_prompt', + to='main.executionenvironment', + ), + ), + migrations.AddField( + model_name='workflowjobnode', + name='labels', + field=models.ManyToManyField(related_name='workflowjobnode_labels', to='main.Label'), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='ask_labels_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='ask_skip_tags_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='workflowjobtemplate', + name='ask_tags_on_launch', + field=awx.main.fields.AskForField(blank=True, default=False), + ), + migrations.AddField( + model_name='workflowjobtemplatenode', + name='execution_environment', + field=models.ForeignKey( + blank=True, + default=None, + help_text='The container image to be used for execution.', + null=True, + on_delete=awx.main.utils.polymorphic.SET_NULL, + related_name='workflowjobtemplatenode_as_prompt', + to='main.executionenvironment', + ), + ), + migrations.AddField( + model_name='workflowjobtemplatenode', + name='labels', + field=models.ManyToManyField(related_name='workflowjobtemplatenode_labels', to='main.Label'), + ), + migrations.CreateModel( + name='WorkflowJobTemplateNodeBaseInstanceGroupMembership', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('position', models.PositiveIntegerField(db_index=True, default=None, null=True)), + ('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')), + ('workflowjobtemplatenode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobtemplatenode')), + ], + ), + migrations.CreateModel( + name='WorkflowJobNodeBaseInstanceGroupMembership', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('position', models.PositiveIntegerField(db_index=True, default=None, null=True)), + ('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')), + ('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobnode')), + ], + ), + migrations.CreateModel( + name='WorkflowJobInstanceGroupMembership', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('position', models.PositiveIntegerField(db_index=True, default=None, null=True)), + ('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')), + ('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjob')), + ], + ), + migrations.CreateModel( + name='ScheduleInstanceGroupMembership', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('position', models.PositiveIntegerField(db_index=True, default=None, null=True)), + ('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')), + ('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.schedule')), + ], + ), + migrations.CreateModel( + name='JobLaunchConfigInstanceGroupMembership', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('position', models.PositiveIntegerField(db_index=True, default=None, null=True)), + ('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')), + ('joblaunchconfig', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.joblaunchconfig')), + ], + ), + migrations.AddField( + model_name='joblaunchconfig', + name='instance_groups', + field=awx.main.fields.OrderedManyToManyField( + blank=True, editable=False, related_name='joblaunchconfigs', through='main.JobLaunchConfigInstanceGroupMembership', to='main.InstanceGroup' + ), + ), + migrations.AddField( + model_name='schedule', + name='instance_groups', + field=awx.main.fields.OrderedManyToManyField( + blank=True, editable=False, related_name='schedule_instance_groups', through='main.ScheduleInstanceGroupMembership', to='main.InstanceGroup' + ), + ), + migrations.AddField( + model_name='workflowjob', + name='instance_groups', + field=awx.main.fields.OrderedManyToManyField( + blank=True, + editable=False, + related_name='workflow_job_instance_groups', + through='main.WorkflowJobInstanceGroupMembership', + to='main.InstanceGroup', + ), + ), + migrations.AddField( + model_name='workflowjobnode', + name='instance_groups', + field=awx.main.fields.OrderedManyToManyField( + blank=True, + editable=False, + related_name='workflow_job_node_instance_groups', + through='main.WorkflowJobNodeBaseInstanceGroupMembership', + to='main.InstanceGroup', + ), + ), + migrations.AddField( + model_name='workflowjobtemplatenode', + name='instance_groups', + field=awx.main.fields.OrderedManyToManyField( + blank=True, + editable=False, + related_name='workflow_job_template_node_instance_groups', + through='main.WorkflowJobTemplateNodeBaseInstanceGroupMembership', + to='main.InstanceGroup', + ), + ), + ] diff --git a/awx/main/models/ha.py b/awx/main/models/ha.py index 5f9588f627..eeed06bc60 100644 --- a/awx/main/models/ha.py +++ b/awx/main/models/ha.py @@ -434,3 +434,58 @@ class InventoryInstanceGroupMembership(models.Model): default=None, db_index=True, ) + + +class JobLaunchConfigInstanceGroupMembership(models.Model): + + joblaunchconfig = models.ForeignKey('JobLaunchConfig', on_delete=models.CASCADE) + instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE) + position = models.PositiveIntegerField( + null=True, + default=None, + db_index=True, + ) + + +class ScheduleInstanceGroupMembership(models.Model): + + schedule = models.ForeignKey('Schedule', on_delete=models.CASCADE) + instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE) + position = models.PositiveIntegerField( + null=True, + default=None, + db_index=True, + ) + + +class WorkflowJobTemplateNodeBaseInstanceGroupMembership(models.Model): + + workflowjobtemplatenode = models.ForeignKey('WorkflowJobTemplateNode', on_delete=models.CASCADE) + instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE) + position = models.PositiveIntegerField( + null=True, + default=None, + db_index=True, + ) + + +class WorkflowJobNodeBaseInstanceGroupMembership(models.Model): + + workflowjobnode = models.ForeignKey('WorkflowJobNode', on_delete=models.CASCADE) + instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE) + position = models.PositiveIntegerField( + null=True, + default=None, + db_index=True, + ) + + +class WorkflowJobInstanceGroupMembership(models.Model): + + workflowjobnode = models.ForeignKey('WorkflowJob', on_delete=models.CASCADE) + instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE) + position = models.PositiveIntegerField( + null=True, + default=None, + db_index=True, + ) diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index 2539055318..b954c76e35 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -43,8 +43,8 @@ from awx.main.models.notifications import ( NotificationTemplate, JobNotificationMixin, ) -from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField -from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob +from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic +from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField from awx.main.models.mixins import ( ResourceMixin, SurveyJobTemplateMixin, @@ -227,15 +227,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour blank=True, default=False, ) - ask_limit_on_launch = AskForField( - blank=True, - default=False, - ) - ask_tags_on_launch = AskForField(blank=True, default=False, allows_field='job_tags') - ask_skip_tags_on_launch = AskForField( - blank=True, - default=False, - ) ask_job_type_on_launch = AskForField( blank=True, default=False, @@ -244,12 +235,27 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour blank=True, default=False, ) - ask_inventory_on_launch = AskForField( + ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials') + ask_execution_environment_on_launch = AskForField( + blank=True, + default=False, + ) + ask_forks_on_launch = AskForField( + blank=True, + default=False, + ) + ask_job_slice_count_on_launch = AskForField( + blank=True, + default=False, + ) + ask_timeout_on_launch = AskForField( + blank=True, + default=False, + ) + ask_instance_groups_on_launch = AskForField( blank=True, default=False, ) - ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials') - ask_scm_branch_on_launch = AskForField(blank=True, default=False, allows_field='scm_branch') job_slice_count = models.PositiveIntegerField( blank=True, default=1, @@ -276,7 +282,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour @classmethod def _get_unified_job_field_names(cls): return set(f.name for f in JobOptions._meta.fields) | set( - ['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials', 'job_slice_number', 'job_slice_count', 'execution_environment'] + [ + 'name', + 'description', + 'organization', + 'survey_passwords', + 'labels', + 'credentials', + 'job_slice_number', + 'job_slice_count', + 'execution_environment', + ] ) @property @@ -314,10 +330,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour actual_inventory = self.inventory if self.ask_inventory_on_launch and 'inventory' in kwargs: actual_inventory = kwargs['inventory'] + actual_slice_count = self.job_slice_count + if self.ask_job_slice_count_on_launch and 'job_slice_count' in kwargs: + actual_slice_count = kwargs['job_slice_count'] if actual_inventory: - return min(self.job_slice_count, actual_inventory.hosts.count()) + return min(actual_slice_count, actual_inventory.hosts.count()) else: - return self.job_slice_count + return actual_slice_count def save(self, *args, **kwargs): update_fields = kwargs.get('update_fields', []) @@ -425,10 +444,15 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour field = self._meta.get_field(field_name) if isinstance(field, models.ManyToManyField): - old_value = set(old_value.all()) - new_value = set(kwargs[field_name]) - old_value - if not new_value: - continue + if field_name == 'instance_groups': + # Instance groups are ordered so we can't make a set out of them + old_value = old_value.all() + elif field_name == 'credentials': + # Credentials have a weird pattern because of how they are layered + old_value = set(old_value.all()) + new_value = set(kwargs[field_name]) - old_value + if not new_value: + continue if new_value == old_value: # no-op case: Fields the same as template's value @@ -449,6 +473,10 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour rejected_data[field_name] = new_value errors_dict[field_name] = _('Project does not allow override of branch.') continue + elif field_name == 'job_slice_count' and (new_value > 1) and (self.get_effective_slice_ct(kwargs) <= 1): + rejected_data[field_name] = new_value + errors_dict[field_name] = _('Job inventory does not have enough hosts for slicing') + continue # accepted prompt prompted_data[field_name] = new_value else: @@ -767,6 +795,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana @property def preferred_instance_groups(self): + # If the user specified instance groups those will be handled by the unified_job.create_unified_job + # This function handles only the defaults for a template w/o user specification if self.organization is not None: organization_groups = [x for x in self.organization.instance_groups.all()] else: @@ -906,10 +936,36 @@ class LaunchTimeConfigBase(BaseModel): # This is a solution to the nullable CharField problem, specific to prompting char_prompts = JSONBlob(default=dict, blank=True) - def prompts_dict(self, display=False): + # Define fields that are not really fields, but alias to char_prompts lookups + limit = NullablePromptPseudoField('limit') + scm_branch = NullablePromptPseudoField('scm_branch') + job_tags = NullablePromptPseudoField('job_tags') + skip_tags = NullablePromptPseudoField('skip_tags') + diff_mode = NullablePromptPseudoField('diff_mode') + job_type = NullablePromptPseudoField('job_type') + verbosity = NullablePromptPseudoField('verbosity') + forks = NullablePromptPseudoField('forks') + job_slice_count = NullablePromptPseudoField('job_slice_count') + timeout = NullablePromptPseudoField('timeout') + + # NOTE: additional fields are assumed to exist but must be defined in subclasses + # due to technical limitations + SUBCLASS_FIELDS = ( + 'instance_groups', # needs a through model defined + 'extra_vars', # alternates between extra_vars and extra_data + 'credentials', # already a unified job and unified JT field + 'labels', # already a unified job and unified JT field + 'execution_environment', # already a unified job and unified JT field + ) + + def prompts_dict(self, display=False, for_cls=None): data = {} + if for_cls: + cls = for_cls + else: + cls = JobTemplate # Some types may have different prompts, but always subset of JT prompts - for prompt_name in JobTemplate.get_ask_mapping().keys(): + for prompt_name in cls.get_ask_mapping().keys(): try: field = self._meta.get_field(prompt_name) except FieldDoesNotExist: @@ -917,18 +973,23 @@ class LaunchTimeConfigBase(BaseModel): if isinstance(field, models.ManyToManyField): if not self.pk: continue # unsaved object can't have related many-to-many - prompt_val = set(getattr(self, prompt_name).all()) - if len(prompt_val) > 0: - data[prompt_name] = prompt_val + prompt_values = list(getattr(self, prompt_name).all()) + # Many to manys can't distinguish between None and [] + # Because of this, from a config perspective, we assume [] is none and we don't save [] into the config + if len(prompt_values) > 0: + data[prompt_name] = prompt_values elif prompt_name == 'extra_vars': if self.extra_vars: + extra_vars = {} if display: - data[prompt_name] = self.display_extra_vars() + extra_vars = self.display_extra_vars() else: - data[prompt_name] = self.extra_vars + extra_vars = self.extra_vars # Depending on model, field type may save and return as string - if isinstance(data[prompt_name], str): - data[prompt_name] = parse_yaml_or_json(data[prompt_name]) + if isinstance(extra_vars, str): + extra_vars = parse_yaml_or_json(extra_vars) + if extra_vars: + data['extra_vars'] = extra_vars if self.survey_passwords and not display: data['survey_passwords'] = self.survey_passwords else: @@ -938,15 +999,6 @@ class LaunchTimeConfigBase(BaseModel): return data -for field_name in JobTemplate.get_ask_mapping().keys(): - if field_name == 'extra_vars': - continue - try: - LaunchTimeConfigBase._meta.get_field(field_name) - except FieldDoesNotExist: - setattr(LaunchTimeConfigBase, field_name, NullablePromptPseudoField(field_name)) - - class LaunchTimeConfig(LaunchTimeConfigBase): """ Common model for all objects that save details of a saved launch config @@ -965,8 +1017,18 @@ class LaunchTimeConfig(LaunchTimeConfigBase): blank=True, ) ) - # Credentials needed for non-unified job / unified JT models + # Fields needed for non-unified job / unified JT models, because they are defined on unified models credentials = models.ManyToManyField('Credential', related_name='%(class)ss') + labels = models.ManyToManyField('Label', related_name='%(class)s_labels') + execution_environment = models.ForeignKey( + 'ExecutionEnvironment', + null=True, + blank=True, + default=None, + on_delete=polymorphic.SET_NULL, + related_name='%(class)s_as_prompt', + help_text="The container image to be used for execution.", + ) @property def extra_vars(self): @@ -1010,6 +1072,11 @@ class JobLaunchConfig(LaunchTimeConfig): editable=False, ) + # Instance Groups needed for non-unified job / unified JT models + instance_groups = OrderedManyToManyField( + 'InstanceGroup', related_name='%(class)ss', blank=True, editable=False, through='JobLaunchConfigInstanceGroupMembership' + ) + def has_user_prompts(self, template): """ Returns True if any fields exist in the launch config that are diff --git a/awx/main/models/label.py b/awx/main/models/label.py index 7ca92d4ff2..419fc68801 100644 --- a/awx/main/models/label.py +++ b/awx/main/models/label.py @@ -10,6 +10,8 @@ from awx.api.versioning import reverse from awx.main.models.base import CommonModelNameNotUnique from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob from awx.main.models.inventory import Inventory +from awx.main.models.schedules import Schedule +from awx.main.models.workflow import WorkflowJobTemplateNode, WorkflowJobNode __all__ = ('Label',) @@ -34,16 +36,22 @@ class Label(CommonModelNameNotUnique): def get_absolute_url(self, request=None): return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request) - @staticmethod - def get_orphaned_labels(): - return Label.objects.filter(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True) - def is_detached(self): - return Label.objects.filter(id=self.id, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True).exists() + return Label.objects.filter( + id=self.id, + unifiedjob_labels__isnull=True, + unifiedjobtemplate_labels__isnull=True, + inventory_labels__isnull=True, + schedule_labels__isnull=True, + workflowjobtemplatenode_labels__isnull=True, + workflowjobnode_labels__isnull=True, + ).exists() def is_candidate_for_detach(self): - - c1 = UnifiedJob.objects.filter(labels__in=[self.id]).count() - c2 = UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count() - c3 = Inventory.objects.filter(labels__in=[self.id]).count() - return (c1 + c2 + c3 - 1) == 0 + count = UnifiedJob.objects.filter(labels__in=[self.id]).count() # Both Jobs and WFJobs + count += UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count() # Both JTs and WFJT + count += Inventory.objects.filter(labels__in=[self.id]).count() + count += Schedule.objects.filter(labels__in=[self.id]).count() + count += WorkflowJobTemplateNode.objects.filter(labels__in=[self.id]).count() + count += WorkflowJobNode.objects.filter(labels__in=[self.id]).count() + return (count - 1) == 0 diff --git a/awx/main/models/mixins.py b/awx/main/models/mixins.py index 0e38d7288c..df10f0b29f 100644 --- a/awx/main/models/mixins.py +++ b/awx/main/models/mixins.py @@ -104,6 +104,33 @@ class SurveyJobTemplateMixin(models.Model): default=False, ) survey_spec = prevent_search(JSONBlob(default=dict, blank=True)) + + ask_inventory_on_launch = AskForField( + blank=True, + default=False, + ) + ask_limit_on_launch = AskForField( + blank=True, + default=False, + ) + ask_scm_branch_on_launch = AskForField( + blank=True, + default=False, + allows_field='scm_branch', + ) + ask_labels_on_launch = AskForField( + blank=True, + default=False, + ) + ask_tags_on_launch = AskForField( + blank=True, + default=False, + allows_field='job_tags', + ) + ask_skip_tags_on_launch = AskForField( + blank=True, + default=False, + ) ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars') def survey_password_variables(self): diff --git a/awx/main/models/schedules.py b/awx/main/models/schedules.py index 29d43ec98d..98c241059d 100644 --- a/awx/main/models/schedules.py +++ b/awx/main/models/schedules.py @@ -18,6 +18,7 @@ from django.utils.translation import gettext_lazy as _ # AWX from awx.api.versioning import reverse +from awx.main.fields import OrderedManyToManyField from awx.main.models.base import PrimordialModel from awx.main.models.jobs import LaunchTimeConfig from awx.main.utils import ignore_inventory_computed_fields @@ -83,6 +84,13 @@ class Schedule(PrimordialModel, LaunchTimeConfig): ) rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule.")) next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run.")) + instance_groups = OrderedManyToManyField( + 'InstanceGroup', + related_name='schedule_instance_groups', + blank=True, + editable=False, + through='ScheduleInstanceGroupMembership', + ) @classmethod def get_zoneinfo(cls): diff --git a/awx/main/models/unified_jobs.py b/awx/main/models/unified_jobs.py index 5ef8fed0f7..a8ac64b2cc 100644 --- a/awx/main/models/unified_jobs.py +++ b/awx/main/models/unified_jobs.py @@ -332,10 +332,11 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn return NotificationTemplate.objects.none() - def create_unified_job(self, **kwargs): + def create_unified_job(self, instance_groups=None, **kwargs): """ Create a new unified job based on this unified job template. """ + # TODO: rename kwargs to prompts, to set expectation that these are runtime values new_job_passwords = kwargs.pop('survey_passwords', {}) eager_fields = kwargs.pop('_eager_fields', None) @@ -382,7 +383,10 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn unified_job.survey_passwords = new_job_passwords kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch - unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache() + if instance_groups: + unified_job.preferred_instance_groups_cache = [ig.id for ig in instance_groups] + else: + unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache() unified_job._set_default_dependencies_processed() unified_job.task_impact = unified_job._get_task_impact() @@ -412,13 +416,17 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn unified_job.handle_extra_data(validated_kwargs['extra_vars']) # Create record of provided prompts for relaunch and rescheduling - unified_job.create_config_from_prompts(kwargs, parent=self) + config = unified_job.create_config_from_prompts(kwargs, parent=self) + if instance_groups: + for ig in instance_groups: + config.instance_groups.add(ig) # manually issue the create activity stream entry _after_ M2M relations # have been associated to the UJ if unified_job.__class__ in activity_stream_registrar.models: activity_stream_create(None, unified_job, True) unified_job.log_lifecycle("created") + return unified_job @classmethod @@ -973,22 +981,38 @@ class UnifiedJob( valid_fields.extend(['survey_passwords', 'extra_vars']) else: kwargs.pop('survey_passwords', None) + many_to_many_fields = [] for field_name, value in kwargs.items(): if field_name not in valid_fields: raise Exception('Unrecognized launch config field {}.'.format(field_name)) - if field_name == 'credentials': + field = None + # may use extra_data as a proxy for extra_vars + if field_name in config.SUBCLASS_FIELDS and field_name != 'extra_vars': + field = config._meta.get_field(field_name) + if isinstance(field, models.ManyToManyField): + many_to_many_fields.append(field_name) continue - key = field_name - if key == 'extra_vars': - key = 'extra_data' - setattr(config, key, value) + if isinstance(field, (models.ForeignKey)) and (value is None): + continue # the null value indicates not-provided for ForeignKey case + setattr(config, field_name, value) config.save() - job_creds = set(kwargs.get('credentials', [])) - if 'credentials' in [field.name for field in parent._meta.get_fields()]: - job_creds = job_creds - set(parent.credentials.all()) - if job_creds: - config.credentials.add(*job_creds) + for field_name in many_to_many_fields: + prompted_items = kwargs.get(field_name, []) + if not prompted_items: + continue + if field_name == 'instance_groups': + # Here we are doing a loop to make sure we preserve order for this Ordered field + # also do not merge IGs with parent, so this saves the literal list + for item in prompted_items: + getattr(config, field_name).add(item) + else: + # Assuming this field merges prompts with parent, save just the diff + if field_name in [field.name for field in parent._meta.get_fields()]: + prompted_items = set(prompted_items) - set(getattr(parent, field_name).all()) + if prompted_items: + getattr(config, field_name).add(*prompted_items) + return config @property diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py index 4f52ade6b4..f97b4e93b8 100644 --- a/awx/main/models/workflow.py +++ b/awx/main/models/workflow.py @@ -29,7 +29,7 @@ from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, Un from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin from awx.main.models.base import CreatedModifiedModel, VarsDictProperty from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR -from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob +from awx.main.fields import ImplicitRoleField, JSONBlob, OrderedManyToManyField from awx.main.models.mixins import ( ResourceMixin, SurveyJobTemplateMixin, @@ -114,6 +114,9 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig): 'credentials', 'char_prompts', 'all_parents_must_converge', + 'labels', + 'instance_groups', + 'execution_environment', ] def create_workflow_job_node(self, **kwargs): @@ -122,7 +125,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig): """ create_kwargs = {} for field_name in self._get_workflow_job_field_names(): - if field_name == 'credentials': + if field_name in ['credentials', 'labels', 'instance_groups']: continue if field_name in kwargs: create_kwargs[field_name] = kwargs[field_name] @@ -132,10 +135,20 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig): new_node = WorkflowJobNode.objects.create(**create_kwargs) if self.pk: allowed_creds = self.credentials.all() + allowed_labels = self.labels.all() + allowed_instance_groups = self.instance_groups.all() else: allowed_creds = [] + allowed_labels = [] + allowed_instance_groups = [] for cred in allowed_creds: new_node.credentials.add(cred) + + for label in allowed_labels: + new_node.labels.add(label) + for instance_group in allowed_instance_groups: + new_node.instance_groups.add(instance_group) + return new_node @@ -153,6 +166,9 @@ class WorkflowJobTemplateNode(WorkflowNodeBase): 'char_prompts', 'all_parents_must_converge', 'identifier', + 'labels', + 'execution_environment', + 'instance_groups', ] REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords'] @@ -167,6 +183,13 @@ class WorkflowJobTemplateNode(WorkflowNodeBase): blank=False, help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'), ) + instance_groups = OrderedManyToManyField( + 'InstanceGroup', + related_name='workflow_job_template_node_instance_groups', + blank=True, + editable=False, + through='WorkflowJobTemplateNodeBaseInstanceGroupMembership', + ) class Meta: app_label = 'main' @@ -211,7 +234,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase): approval_template = WorkflowApprovalTemplate(**kwargs) approval_template.save() self.unified_job_template = approval_template - self.save() + self.save(update_fields=['unified_job_template']) return approval_template @@ -250,6 +273,9 @@ class WorkflowJobNode(WorkflowNodeBase): blank=True, # blank denotes pre-migration job nodes help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'), ) + instance_groups = OrderedManyToManyField( + 'InstanceGroup', related_name='workflow_job_node_instance_groups', blank=True, editable=False, through='WorkflowJobNodeBaseInstanceGroupMembership' + ) class Meta: app_label = 'main' @@ -265,19 +291,6 @@ class WorkflowJobNode(WorkflowNodeBase): def get_absolute_url(self, request=None): return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request) - def prompts_dict(self, *args, **kwargs): - r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs) - # Explanation - WFJT extra_vars still break pattern, so they are not - # put through prompts processing, but inventory and others are only accepted - # if JT prompts for it, so it goes through this mechanism - if self.workflow_job: - if self.workflow_job.inventory_id: - # workflow job inventory takes precedence - r['inventory'] = self.workflow_job.inventory - if self.workflow_job.char_prompts: - r.update(self.workflow_job.char_prompts) - return r - def get_job_kwargs(self): """ In advance of creating a new unified job as part of a workflow, @@ -287,16 +300,38 @@ class WorkflowJobNode(WorkflowNodeBase): """ # reject/accept prompted fields data = {} + wj_special_vars = {} + wj_special_passwords = {} ujt_obj = self.unified_job_template if ujt_obj is not None: - # MERGE note: move this to prompts_dict method on node when merging - # with the workflow inventory branch - prompts_data = self.prompts_dict() - if isinstance(ujt_obj, WorkflowJobTemplate): - if self.workflow_job.extra_vars: - prompts_data.setdefault('extra_vars', {}) - prompts_data['extra_vars'].update(self.workflow_job.extra_vars_dict) - accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_data) + node_prompts_data = self.prompts_dict(for_cls=ujt_obj.__class__) + wj_prompts_data = self.workflow_job.prompts_dict(for_cls=ujt_obj.__class__) + # Explanation - special historical case + # WFJT extra_vars ignored JobTemplate.ask_variables_on_launch, bypassing _accept_or_ignore_job_kwargs + # inventory and others are only accepted if JT prompts for it with related ask_ field + # this is inconsistent, but maintained + if not isinstance(ujt_obj, WorkflowJobTemplate): + wj_special_vars = wj_prompts_data.pop('extra_vars', {}) + wj_special_passwords = wj_prompts_data.pop('survey_passwords', {}) + elif 'extra_vars' in node_prompts_data: + # Follow the vars combination rules + node_prompts_data['extra_vars'].update(wj_prompts_data.pop('extra_vars', {})) + elif 'survey_passwords' in node_prompts_data: + node_prompts_data['survey_passwords'].update(wj_prompts_data.pop('survey_passwords', {})) + + # Follow the credential combination rules + if ('credentials' in wj_prompts_data) and ('credentials' in node_prompts_data): + wj_pivoted_creds = Credential.unique_dict(wj_prompts_data['credentials']) + node_pivoted_creds = Credential.unique_dict(node_prompts_data['credentials']) + node_pivoted_creds.update(wj_pivoted_creds) + wj_prompts_data['credentials'] = [cred for cred in node_pivoted_creds.values()] + + # NOTE: no special rules for instance_groups, because they do not merge + # or labels, because they do not propogate WFJT-->node at all + + # Combine WFJT prompts with node here, WFJT at higher level + node_prompts_data.update(wj_prompts_data) + accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**node_prompts_data) if errors: logger.info( _('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format( @@ -304,15 +339,6 @@ class WorkflowJobNode(WorkflowNodeBase): ) ) data.update(accepted_fields) # missing fields are handled in the scheduler - try: - # config saved on the workflow job itself - wj_config = self.workflow_job.launch_config - except ObjectDoesNotExist: - wj_config = None - if wj_config: - accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict()) - accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later - data.update(accepted_fields) # build ancestor artifacts, save them to node model for later aa_dict = {} is_root_node = True @@ -325,15 +351,12 @@ class WorkflowJobNode(WorkflowNodeBase): self.ancestor_artifacts = aa_dict self.save(update_fields=['ancestor_artifacts']) # process password list - password_dict = {} + password_dict = data.get('survey_passwords', {}) if '_ansible_no_log' in aa_dict: for key in aa_dict: if key != '_ansible_no_log': password_dict[key] = REPLACE_STR - if self.workflow_job.survey_passwords: - password_dict.update(self.workflow_job.survey_passwords) - if self.survey_passwords: - password_dict.update(self.survey_passwords) + password_dict.update(wj_special_passwords) if password_dict: data['survey_passwords'] = password_dict # process extra_vars @@ -343,12 +366,12 @@ class WorkflowJobNode(WorkflowNodeBase): functional_aa_dict = copy(aa_dict) functional_aa_dict.pop('_ansible_no_log', None) extra_vars.update(functional_aa_dict) - if ujt_obj and isinstance(ujt_obj, JobTemplate): - # Workflow Job extra_vars higher precedence than ancestor artifacts - if self.workflow_job and self.workflow_job.extra_vars: - extra_vars.update(self.workflow_job.extra_vars_dict) + + # Workflow Job extra_vars higher precedence than ancestor artifacts + extra_vars.update(wj_special_vars) if extra_vars: data['extra_vars'] = extra_vars + # ensure that unified jobs created by WorkflowJobs are marked data['_eager_fields'] = {'launch_type': 'workflow'} if self.workflow_job and self.workflow_job.created_by: @@ -374,6 +397,10 @@ class WorkflowJobOptions(LaunchTimeConfigBase): ) ) ) + # Workflow jobs are used for sliced jobs, and thus, must be a conduit for any JT prompts + instance_groups = OrderedManyToManyField( + 'InstanceGroup', related_name='workflow_job_instance_groups', blank=True, editable=False, through='WorkflowJobInstanceGroupMembership' + ) allow_simultaneous = models.BooleanField(default=False) extra_vars_dict = VarsDictProperty('extra_vars', True) @@ -385,7 +412,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase): @classmethod def _get_unified_job_field_names(cls): r = set(f.name for f in WorkflowJobOptions._meta.fields) | set( - ['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch'] + ['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch', 'job_tags', 'skip_tags'] ) r.remove('char_prompts') # needed due to copying launch config to launch config return r @@ -425,26 +452,29 @@ class WorkflowJobOptions(LaunchTimeConfigBase): class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin): SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')] - FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'organization', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'] + FIELDS_TO_PRESERVE_AT_COPY = [ + 'labels', + 'organization', + 'instance_groups', + 'workflow_job_template_nodes', + 'credentials', + 'survey_spec', + 'skip_tags', + 'job_tags', + 'execution_environment', + ] class Meta: app_label = 'main' - ask_inventory_on_launch = AskForField( + notification_templates_approvals = models.ManyToManyField( + "NotificationTemplate", blank=True, - default=False, + related_name='%(class)s_notification_templates_for_approvals', ) - ask_limit_on_launch = AskForField( - blank=True, - default=False, + admin_role = ImplicitRoleField( + parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'], ) - ask_scm_branch_on_launch = AskForField( - blank=True, - default=False, - ) - notification_templates_approvals = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_approvals') - - admin_role = ImplicitRoleField(parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role']) execute_role = ImplicitRoleField( parent_role=[ 'admin_role', @@ -713,6 +743,25 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set)) return artifacts + def prompts_dict(self, *args, **kwargs): + if self.job_template_id: + # HACK: Exception for sliced jobs here, this is bad + # when sliced jobs were introduced, workflows did not have all the prompted JT fields + # so to support prompting with slicing, we abused the workflow job launch config + # these would be more properly saved on the workflow job, but it gets the wrong fields now + try: + wj_config = self.launch_config + r = wj_config.prompts_dict(*args, **kwargs) + except ObjectDoesNotExist: + r = {} + else: + r = super().prompts_dict(*args, **kwargs) + # Workflow labels and job labels are treated separately + # that means that they do not propogate from WFJT / workflow job to jobs in workflow + r.pop('labels', None) + + return r + def get_notification_templates(self): return self.workflow_job_template.notification_templates diff --git a/awx/main/tests/factories/fixtures.py b/awx/main/tests/factories/fixtures.py index 200fa0f195..27556d6efe 100644 --- a/awx/main/tests/factories/fixtures.py +++ b/awx/main/tests/factories/fixtures.py @@ -210,7 +210,7 @@ def mk_workflow_job_template(name, extra_vars='', spec=None, organization=None, if extra_vars: extra_vars = json.dumps(extra_vars) - wfjt = WorkflowJobTemplate(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service) + wfjt = WorkflowJobTemplate.objects.create(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service) if spec: wfjt.survey_spec = spec diff --git a/awx/main/tests/functional/api/test_job.py b/awx/main/tests/functional/api/test_job.py index 8c405c4e0e..53e31e5981 100644 --- a/awx/main/tests/functional/api/test_job.py +++ b/awx/main/tests/functional/api/test_job.py @@ -13,17 +13,11 @@ from django.utils import timezone # AWX from awx.api.versioning import reverse from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin -from awx.main.models import ( - JobTemplate, - User, - Job, - AdHocCommand, - ProjectUpdate, -) +from awx.main.models import JobTemplate, User, Job, AdHocCommand, ProjectUpdate, InstanceGroup, Label, Organization @pytest.mark.django_db -def test_job_relaunch_permission_denied_response(post, get, inventory, project, credential, net_credential, machine_credential): +def test_job_relaunch_permission_denied_response(post, get, inventory, project, net_credential, machine_credential): jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True) jt.credentials.add(machine_credential) jt_user = User.objects.create(username='jobtemplateuser') @@ -39,6 +33,22 @@ def test_job_relaunch_permission_denied_response(post, get, inventory, project, job.launch_config.credentials.add(net_credential) r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403) assert 'launched with prompted fields you do not have access to' in r.data['detail'] + job.launch_config.credentials.clear() + + # Job has prompted instance group that user cannot see + job.launch_config.instance_groups.add(InstanceGroup.objects.create()) + r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403) + assert 'launched with prompted fields you do not have access to' in r.data['detail'] + job.launch_config.instance_groups.clear() + + # Job has prompted label that user cannot see + job.launch_config.labels.add(Label.objects.create(organization=Organization.objects.create())) + r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403) + assert 'launched with prompted fields you do not have access to' in r.data['detail'] + job.launch_config.labels.clear() + + # without any of those prompts, user can launch + r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=201) @pytest.mark.django_db diff --git a/awx/main/tests/functional/api/test_job_runtime_params.py b/awx/main/tests/functional/api/test_job_runtime_params.py index 33d91ded58..f477a66ed9 100644 --- a/awx/main/tests/functional/api/test_job_runtime_params.py +++ b/awx/main/tests/functional/api/test_job_runtime_params.py @@ -4,8 +4,7 @@ import yaml import json from awx.api.serializers import JobLaunchSerializer -from awx.main.models.credential import Credential -from awx.main.models.inventory import Inventory, Host +from awx.main.models import Credential, Inventory, Host, ExecutionEnvironment, Label, InstanceGroup from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate from awx.api.versioning import reverse @@ -15,6 +14,11 @@ from awx.api.versioning import reverse def runtime_data(organization, credentialtype_ssh): cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'}) inv_obj = organization.inventories.create(name="runtime-inv") + inv_obj.hosts.create(name='foo1') + inv_obj.hosts.create(name='foo2') + ee_obj = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar') + ig_obj = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2) + labels_obj = Label.objects.create(name='foo', description='bar', organization=organization) return dict( extra_vars='{"job_launch_var": 4}', limit='test-servers', @@ -25,6 +29,12 @@ def runtime_data(organization, credentialtype_ssh): credentials=[cred_obj.pk], diff_mode=True, verbosity=2, + execution_environment=ee_obj.pk, + labels=[labels_obj.pk], + forks=7, + job_slice_count=2, + timeout=10, + instance_groups=[ig_obj.pk], ) @@ -54,6 +64,12 @@ def job_template_prompts(project, inventory, machine_credential): ask_credential_on_launch=on_off, ask_diff_mode_on_launch=on_off, ask_verbosity_on_launch=on_off, + ask_execution_environment_on_launch=on_off, + ask_labels_on_launch=on_off, + ask_forks_on_launch=on_off, + ask_job_slice_count_on_launch=on_off, + ask_timeout_on_launch=on_off, + ask_instance_groups_on_launch=on_off, ) jt.credentials.add(machine_credential) return jt @@ -77,6 +93,12 @@ def job_template_prompts_null(project): ask_credential_on_launch=True, ask_diff_mode_on_launch=True, ask_verbosity_on_launch=True, + ask_execution_environment_on_launch=True, + ask_labels_on_launch=True, + ask_forks_on_launch=True, + ask_job_slice_count_on_launch=True, + ask_timeout_on_launch=True, + ask_instance_groups_on_launch=True, ) @@ -92,6 +114,12 @@ def data_to_internal(data): internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials']) if 'inventory' in data: internal['inventory'] = Inventory.objects.get(pk=data['inventory']) + if 'execution_environment' in data: + internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment']) + if 'labels' in data: + internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']] + if 'instance_groups' in data: + internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']] return internal @@ -124,6 +152,12 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad assert 'credentials' in response.data['ignored_fields'] assert 'job_tags' in response.data['ignored_fields'] assert 'skip_tags' in response.data['ignored_fields'] + assert 'execution_environment' in response.data['ignored_fields'] + assert 'labels' in response.data['ignored_fields'] + assert 'forks' in response.data['ignored_fields'] + assert 'job_slice_count' in response.data['ignored_fields'] + assert 'timeout' in response.data['ignored_fields'] + assert 'instance_groups' in response.data['ignored_fields'] @pytest.mark.django_db @@ -162,6 +196,34 @@ def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker): mock_job.signal_start.assert_called_once() +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_slice_timeout_forks_need_int(job_template_prompts, post, admin_user, mocker): + job_template = job_template_prompts(True) + + mock_job = mocker.MagicMock(spec=Job, id=968) + + with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): + with mocker.patch('awx.api.serializers.JobSerializer.to_representation'): + response = post( + reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'timeout': '', 'job_slice_count': '', 'forks': ''}, admin_user, expect=400 + ) + assert 'forks' in response.data and response.data['forks'][0] == 'A valid integer is required.' + assert 'job_slice_count' in response.data and response.data['job_slice_count'][0] == 'A valid integer is required.' + assert 'timeout' in response.data and response.data['timeout'][0] == 'A valid integer is required.' + + +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_slice_count_not_supported(job_template_prompts, post, admin_user): + job_template = job_template_prompts(True) + assert job_template.inventory.hosts.count() == 0 + job_template.inventory.hosts.create(name='foo') + + response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_slice_count': 8}, admin_user, expect=400) + assert response.data['job_slice_count'][0] == 'Job inventory does not have enough hosts for slicing' + + @pytest.mark.django_db @pytest.mark.job_runtime_vars def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker): @@ -176,6 +238,10 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, inventory = Inventory.objects.get(pk=runtime_data['inventory']) inventory.use_role.members.add(rando) + # Instance Groups and label can not currently easily be used by rando so we need to remove the instance groups from the runtime data + runtime_data.pop('instance_groups') + runtime_data.pop('labels') + mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data) with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): @@ -243,12 +309,59 @@ def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime @pytest.mark.django_db @pytest.mark.job_runtime_vars -def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando): +def test_job_launch_works_without_access_to_ig_if_ig_in_template(job_template_prompts, runtime_data, post, rando, mocker): + job_template = job_template_prompts(True) + job_template.instance_groups.add(InstanceGroup.objects.get(id=runtime_data['instance_groups'][0])) + job_template.instance_groups.add(InstanceGroup.objects.create(name='foo')) + job_template.save() + job_template.execute_role.members.add(rando) + + # Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added + post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(instance_groups=runtime_data['instance_groups']), rando, expect=201) + + +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_job_launch_works_without_access_to_label_if_label_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization): + job_template = job_template_prompts(True) + job_template.labels.add(Label.objects.get(id=runtime_data['labels'][0])) + job_template.labels.add(Label.objects.create(name='baz', description='faz', organization=organization)) + job_template.save() + job_template.execute_role.members.add(rando) + + # Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added + post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(labels=runtime_data['labels']), rando, expect=201) + + +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_job_launch_works_without_access_to_ee_if_ee_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization): + job_template = job_template_prompts(True) + job_template.execute_role.members.add(rando) + + # Make sure we get a 201 instead of a 403 since we are providing an override that is already in the template + post( + reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(execution_environment=runtime_data['execution_environment']), rando, expect=201 + ) + + +@pytest.mark.parametrize( + 'item_type', + [ + ('credentials'), + ('labels'), + ('instance_groups'), + ], +) +@pytest.mark.django_db +@pytest.mark.job_runtime_vars +def test_job_launch_fails_without_access(job_template_prompts, runtime_data, post, rando, item_type): job_template = job_template_prompts(True) job_template.execute_role.members.add(rando) # Assure that giving a credential without access blocks the launch - post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(credentials=runtime_data['credentials']), rando, expect=403) + data = {item_type: runtime_data[item_type]} + post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), data, rando, expect=403) @pytest.mark.django_db diff --git a/awx/main/tests/functional/api/test_workflow_node.py b/awx/main/tests/functional/api/test_workflow_node.py index 0b89dfb546..71874085d7 100644 --- a/awx/main/tests/functional/api/test_workflow_node.py +++ b/awx/main/tests/functional/api/test_workflow_node.py @@ -77,6 +77,18 @@ class TestApprovalNodes: assert approval_node.unified_job_template.description == 'Approval Node' assert approval_node.unified_job_template.timeout == 0 + def test_approval_node_creation_with_timeout(self, post, approval_node, admin_user): + assert approval_node.timeout is None + + url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'}) + post(url, {'name': 'Test', 'description': 'Approval Node', 'timeout': 10}, user=admin_user, expect=201) + + approval_node = WorkflowJobTemplateNode.objects.get(pk=approval_node.pk) + approval_node.refresh_from_db() + assert approval_node.timeout is None + assert isinstance(approval_node.unified_job_template, WorkflowApprovalTemplate) + assert approval_node.unified_job_template.timeout == 10 + def test_approval_node_creation_failure(self, post, approval_node, admin_user): # This test leaves off a required param to assert that user will get a 400. url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'}) diff --git a/awx/main/tests/functional/conftest.py b/awx/main/tests/functional/conftest.py index 2e3563a2b6..4f8b6bc83c 100644 --- a/awx/main/tests/functional/conftest.py +++ b/awx/main/tests/functional/conftest.py @@ -706,7 +706,7 @@ def jt_linked(organization, project, inventory, machine_credential, credential, @pytest.fixture def workflow_job_template(organization): - wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization) + wjt = WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization) wjt.save() return wjt diff --git a/awx/main/tests/functional/models/test_job.py b/awx/main/tests/functional/models/test_job.py index 7e1ca0b1be..e2ac17fb43 100644 --- a/awx/main/tests/functional/models/test_job.py +++ b/awx/main/tests/functional/models/test_job.py @@ -64,3 +64,26 @@ class TestSlicingModels: inventory2 = Inventory.objects.create(organization=organization, name='fooinv') [inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)] assert job_template.get_effective_slice_ct({'inventory': inventory2}) + + def test_effective_slice_count_prompt(self, job_template, inventory, organization): + job_template.inventory = inventory + # Add our prompt fields to the JT to allow overrides + job_template.ask_job_slice_count_on_launch = True + job_template.ask_inventory_on_launch = True + # Set a default value of the slice count to something low + job_template.job_slice_count = 2 + # Create an inventory with 4 nodes + inventory2 = Inventory.objects.create(organization=organization, name='fooinv') + [inventory2.hosts.create(name='foo{}'.format(i)) for i in range(4)] + # The inventory slice count will be the min of the number of nodes (4) or the job slice (2) + assert job_template.get_effective_slice_ct({'inventory': inventory2}) == 2 + # Now we are going to pass in an override (like the prompt would) and as long as that is < host count we expect that back + assert job_template.get_effective_slice_ct({'inventory': inventory2, 'job_slice_count': 3}) == 3 + + def test_slice_count_prompt_limited_by_inventory(self, job_template, inventory, organization): + assert inventory.hosts.count() == 0 + job_template.inventory = inventory + inventory.hosts.create(name='foo') + + unified_job = job_template.create_unified_job(job_slice_count=2) + assert isinstance(unified_job, Job) diff --git a/awx/main/tests/functional/models/test_job_launch_config.py b/awx/main/tests/functional/models/test_job_launch_config.py index 96c422af80..8f8e56522d 100644 --- a/awx/main/tests/functional/models/test_job_launch_config.py +++ b/awx/main/tests/functional/models/test_job_launch_config.py @@ -1,7 +1,8 @@ import pytest # AWX -from awx.main.models import JobTemplate, JobLaunchConfig +from awx.main.models.jobs import JobTemplate, LaunchTimeConfigBase +from awx.main.models.execution_environments import ExecutionEnvironment @pytest.fixture @@ -11,18 +12,6 @@ def full_jt(inventory, project, machine_credential): return jt -@pytest.fixture -def config_factory(full_jt): - def return_config(data): - job = full_jt.create_unified_job(**data) - try: - return job.launch_config - except JobLaunchConfig.DoesNotExist: - return None - - return return_config - - @pytest.mark.django_db class TestConfigCreation: """ @@ -40,28 +29,73 @@ class TestConfigCreation: assert config.limit == 'foobar' assert config.char_prompts == {'limit': 'foobar'} - def test_added_credential(self, full_jt, credential): - job = full_jt.create_unified_job(credentials=[credential]) + def test_added_related(self, full_jt, credential, default_instance_group, label): + job = full_jt.create_unified_job(credentials=[credential], instance_groups=[default_instance_group], labels=[label]) config = job.launch_config assert set(config.credentials.all()) == set([credential]) + assert set(config.labels.all()) == set([label]) + assert set(config.instance_groups.all()) == set([default_instance_group]) def test_survey_passwords_ignored(self, inventory_source): iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'}) assert iu.launch_config.prompts_dict() == {} +@pytest.fixture +def full_prompts_dict(inventory, credential, label, default_instance_group): + ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar') + r = { + 'limit': 'foobar', + 'inventory': inventory, + 'credentials': [credential], + 'execution_environment': ee, + 'labels': [label], + 'instance_groups': [default_instance_group], + 'verbosity': 3, + 'scm_branch': 'non_dev', + 'diff_mode': True, + 'skip_tags': 'foobar', + 'job_tags': 'untagged', + 'forks': 26, + 'job_slice_count': 2, + 'timeout': 200, + 'extra_vars': {'prompted_key': 'prompted_val'}, + 'job_type': 'check', + } + assert set(JobTemplate.get_ask_mapping().keys()) - set(r.keys()) == set() # make fixture comprehensive + return r + + @pytest.mark.django_db -class TestConfigReversibility: +def test_config_reversibility(full_jt, full_prompts_dict): """ Checks that a blob of saved prompts will be re-created in the prompts_dict for launching new jobs """ + config = full_jt.create_unified_job(**full_prompts_dict).launch_config + assert config.prompts_dict() == full_prompts_dict - def test_char_field_only(self, config_factory): - config = config_factory({'limit': 'foobar'}) - assert config.prompts_dict() == {'limit': 'foobar'} - def test_related_objects(self, config_factory, inventory, credential): - prompts = {'limit': 'foobar', 'inventory': inventory, 'credentials': set([credential])} - config = config_factory(prompts) - assert config.prompts_dict() == prompts +@pytest.mark.django_db +class TestLaunchConfigModels: + def get_concrete_subclasses(self, cls): + r = [] + for c in cls.__subclasses__(): + if c._meta.abstract: + r.extend(self.get_concrete_subclasses(c)) + else: + r.append(c) + return r + + def test_non_job_config_complete(self): + """This performs model validation which replaces code that used run on import.""" + for field_name in JobTemplate.get_ask_mapping().keys(): + if field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS: + assert not hasattr(LaunchTimeConfigBase, field_name) + else: + assert hasattr(LaunchTimeConfigBase, field_name) + + def test_subclass_fields_complete(self): + for cls in self.get_concrete_subclasses(LaunchTimeConfigBase): + for field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS: + assert hasattr(cls, field_name) diff --git a/awx/main/tests/functional/models/test_workflow.py b/awx/main/tests/functional/models/test_workflow.py index d8fa495c6c..a21fbaa73b 100644 --- a/awx/main/tests/functional/models/test_workflow.py +++ b/awx/main/tests/functional/models/test_workflow.py @@ -12,6 +12,9 @@ from awx.main.models.workflow import ( ) from awx.main.models.jobs import JobTemplate, Job from awx.main.models.projects import ProjectUpdate +from awx.main.models.credential import Credential, CredentialType +from awx.main.models.label import Label +from awx.main.models.ha import InstanceGroup from awx.main.scheduler.dag_workflow import WorkflowDAG from awx.api.versioning import reverse from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList @@ -229,6 +232,65 @@ class TestWorkflowJob: assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43} assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43} + def test_combine_prompts_WFJT_to_node(self, project, inventory, organization): + """ + Test that complex prompts like variables, credentials, labels, etc + are properly combined from the workflow-level with the node-level + """ + jt = JobTemplate.objects.create( + project=project, + inventory=inventory, + ask_variables_on_launch=True, + ask_credential_on_launch=True, + ask_instance_groups_on_launch=True, + ask_labels_on_launch=True, + ask_limit_on_launch=True, + ) + wj = WorkflowJob.objects.create(name='test-wf-job', extra_vars='{}') + + common_ig = InstanceGroup.objects.create(name='common') + common_ct = CredentialType.objects.create(name='common') + + node = WorkflowJobNode.objects.create(workflow_job=wj, unified_job_template=jt, extra_vars={'node_key': 'node_val'}) + node.limit = 'node_limit' + node.save() + node_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='node')) + node_cred_conflicting = Credential.objects.create(credential_type=common_ct) + node.credentials.add(node_cred_unique, node_cred_conflicting) + node_labels = [Label.objects.create(name='node1', organization=organization), Label.objects.create(name='node2', organization=organization)] + node.labels.add(*node_labels) + node_igs = [common_ig, InstanceGroup.objects.create(name='node')] + for ig in node_igs: + node.instance_groups.add(ig) + + # assertions for where node has prompts but workflow job does not + data = node.get_job_kwargs() + assert data['extra_vars'] == {'node_key': 'node_val'} + assert set(data['credentials']) == set([node_cred_conflicting, node_cred_unique]) + assert data['instance_groups'] == node_igs + assert set(data['labels']) == set(node_labels) + assert data['limit'] == 'node_limit' + + # add prompts to the WorkflowJob + wj.limit = 'wj_limit' + wj.extra_vars = {'wj_key': 'wj_val'} + wj.save() + wj_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='wj')) + wj_cred_conflicting = Credential.objects.create(credential_type=common_ct) + wj.credentials.add(wj_cred_unique, wj_cred_conflicting) + wj.labels.add(Label.objects.create(name='wj1', organization=organization), Label.objects.create(name='wj2', organization=organization)) + wj_igs = [InstanceGroup.objects.create(name='wj'), common_ig] + for ig in wj_igs: + wj.instance_groups.add(ig) + + # assertions for behavior where node and workflow jobs have prompts + data = node.get_job_kwargs() + assert data['extra_vars'] == {'node_key': 'node_val', 'wj_key': 'wj_val'} + assert set(data['credentials']) == set([wj_cred_unique, wj_cred_conflicting, node_cred_unique]) + assert data['instance_groups'] == wj_igs + assert set(data['labels']) == set(node_labels) # as exception, WFJT labels not applied + assert data['limit'] == 'wj_limit' + @pytest.mark.django_db class TestWorkflowJobTemplate: @@ -287,12 +349,25 @@ class TestWorkflowJobTemplatePrompts: @pytest.fixture def wfjt_prompts(self): return WorkflowJobTemplate.objects.create( - ask_inventory_on_launch=True, ask_variables_on_launch=True, ask_limit_on_launch=True, ask_scm_branch_on_launch=True + ask_variables_on_launch=True, + ask_inventory_on_launch=True, + ask_tags_on_launch=True, + ask_labels_on_launch=True, + ask_limit_on_launch=True, + ask_scm_branch_on_launch=True, + ask_skip_tags_on_launch=True, ) @pytest.fixture def prompts_data(self, inventory): - return dict(inventory=inventory, extra_vars={'foo': 'bar'}, limit='webservers', scm_branch='release-3.3') + return dict( + inventory=inventory, + extra_vars={'foo': 'bar'}, + limit='webservers', + scm_branch='release-3.3', + job_tags='foo', + skip_tags='bar', + ) def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory): # null or empty fields used @@ -300,6 +375,9 @@ class TestWorkflowJobTemplatePrompts: assert workflow_job.limit is None assert workflow_job.inventory is None assert workflow_job.scm_branch is None + assert workflow_job.job_tags is None + assert workflow_job.skip_tags is None + assert len(workflow_job.labels.all()) is 0 # fields from prompts used workflow_job = workflow_job_template.create_unified_job(**prompts_data) @@ -307,15 +385,21 @@ class TestWorkflowJobTemplatePrompts: assert workflow_job.limit == 'webservers' assert workflow_job.inventory == inventory assert workflow_job.scm_branch == 'release-3.3' + assert workflow_job.job_tags == 'foo' + assert workflow_job.skip_tags == 'bar' # non-null fields from WFJT used workflow_job_template.inventory = inventory workflow_job_template.limit = 'fooo' workflow_job_template.scm_branch = 'bar' + workflow_job_template.job_tags = 'baz' + workflow_job_template.skip_tags = 'dinosaur' workflow_job = workflow_job_template.create_unified_job() assert workflow_job.limit == 'fooo' assert workflow_job.inventory == inventory assert workflow_job.scm_branch == 'bar' + assert workflow_job.job_tags == 'baz' + assert workflow_job.skip_tags == 'dinosaur' @pytest.mark.django_db def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data): @@ -340,12 +424,19 @@ class TestWorkflowJobTemplatePrompts: ask_limit_on_launch=True, scm_branch='bar', ask_scm_branch_on_launch=True, + job_tags='foo', + skip_tags='bar', ), user=org_admin, expect=201, ) wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) - assert wfjt.char_prompts == {'limit': 'foooo', 'scm_branch': 'bar'} + assert wfjt.char_prompts == { + 'limit': 'foooo', + 'scm_branch': 'bar', + 'job_tags': 'foo', + 'skip_tags': 'bar', + } assert wfjt.ask_scm_branch_on_launch is True assert wfjt.ask_limit_on_launch is True @@ -355,6 +446,67 @@ class TestWorkflowJobTemplatePrompts: assert r.data['limit'] == 'prompt_limit' assert r.data['scm_branch'] == 'prompt_branch' + @pytest.mark.django_db + def test_set_all_ask_for_prompts_false_from_post(self, post, organization, inventory, org_admin): + ''' + Tests default behaviour and values of ask_for_* fields on WFJT via POST + ''' + r = post( + url=reverse('api:workflow_job_template_list'), + data=dict( + name='workflow that tests ask_for prompts', + organization=organization.id, + inventory=inventory.id, + job_tags='', + skip_tags='', + ), + user=org_admin, + expect=201, + ) + wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) + + assert wfjt.ask_inventory_on_launch is False + assert wfjt.ask_labels_on_launch is False + assert wfjt.ask_limit_on_launch is False + assert wfjt.ask_scm_branch_on_launch is False + assert wfjt.ask_skip_tags_on_launch is False + assert wfjt.ask_tags_on_launch is False + assert wfjt.ask_variables_on_launch is False + + @pytest.mark.django_db + def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin): + ''' + Tests behaviour and values of ask_for_* fields on WFJT via POST + ''' + r = post( + url=reverse('api:workflow_job_template_list'), + data=dict( + name='workflow that tests ask_for prompts', + organization=organization.id, + inventory=inventory.id, + job_tags='', + skip_tags='', + ask_inventory_on_launch=True, + ask_labels_on_launch=True, + ask_limit_on_launch=True, + ask_scm_branch_on_launch=True, + ask_skip_tags_on_launch=True, + ask_tags_on_launch=True, + ask_variables_on_launch=True, + ), + user=org_admin, + expect=201, + ) + wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) + + assert wfjt.ask_inventory_on_launch is True + assert wfjt.ask_labels_on_launch is True + assert wfjt.ask_limit_on_launch is True + assert wfjt.ask_scm_branch_on_launch is True + assert wfjt.ask_skip_tags_on_launch is True + assert wfjt.ask_tags_on_launch is True + assert wfjt.ask_variables_on_launch is True + @pytest.mark.django_db def test_workflow_ancestors(organization): diff --git a/awx/main/tests/functional/test_copy.py b/awx/main/tests/functional/test_copy.py index 41f635dde9..9be8d6574c 100644 --- a/awx/main/tests/functional/test_copy.py +++ b/awx/main/tests/functional/test_copy.py @@ -6,12 +6,19 @@ from awx.main.utils import decrypt_field from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate from awx.main.models.jobs import JobTemplate from awx.main.tasks.system import deep_copy_model_obj +from awx.main.models import Label, ExecutionEnvironment, InstanceGroup @pytest.mark.django_db -def test_job_template_copy(post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin): +def test_job_template_copy( + post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin, organization +): + label = Label.objects.create(name="foobar", organization=organization) + ig = InstanceGroup.objects.create(name="bazbar", organization=organization) job_template_with_survey_passwords.project = project job_template_with_survey_passwords.inventory = inventory + job_template_with_survey_passwords.labels.add(label) + job_template_with_survey_passwords.instance_groups.add(ig) job_template_with_survey_passwords.save() job_template_with_survey_passwords.credentials.add(credential) job_template_with_survey_passwords.credentials.add(machine_credential) @@ -54,6 +61,10 @@ def test_job_template_copy(post, get, project, inventory, machine_credential, va assert vault_credential in jt_copy.credentials.all() assert machine_credential in jt_copy.credentials.all() assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec + assert jt_copy.labels.count() != 0 + assert jt_copy.labels.get(pk=label.pk) == label + assert jt_copy.instance_groups.count() != 0 + assert jt_copy.instance_groups.get(pk=ig.pk) == ig @pytest.mark.django_db @@ -109,8 +120,22 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization @pytest.mark.django_db def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization): + ''' + Tests the FIELDS_TO_PRESERVE_AT_COPY attribute on WFJTs + ''' workflow_job_template.organization = organization + + label = Label.objects.create(name="foobar", organization=organization) + workflow_job_template.labels.add(label) + + ee = ExecutionEnvironment.objects.create(name="barfoo", organization=organization) + workflow_job_template.execution_environment = ee + + ig = InstanceGroup.objects.create(name="bazbar", organization=organization) + workflow_job_template.instance_groups.add(ig) + workflow_job_template.save() + jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)] nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)] nodes[0].success_nodes.add(nodes[1]) @@ -124,9 +149,16 @@ def test_workflow_job_template_copy(workflow_job_template, post, get, admin, org wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id) args, kwargs = deep_copy_mock.call_args deep_copy_model_obj(*args, **kwargs) + assert wfjt_copy.organization == organization assert wfjt_copy.created_by == admin assert wfjt_copy.name == 'new wfjt name' + assert wfjt_copy.labels.count() != 0 + assert wfjt_copy.labels.get(pk=label.pk) == label + assert wfjt_copy.execution_environment == ee + assert wfjt_copy.instance_groups.count() != 0 + assert wfjt_copy.instance_groups.get(pk=ig.pk) == ig + copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()] copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1])) for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]): diff --git a/awx/main/tests/functional/test_instances.py b/awx/main/tests/functional/test_instances.py index 39afa7dd32..e704de8971 100644 --- a/awx/main/tests/functional/test_instances.py +++ b/awx/main/tests/functional/test_instances.py @@ -417,3 +417,31 @@ class TestInstanceGroupOrdering: assert job.preferred_instance_groups == [ig_inv, ig_org] job.job_template.instance_groups.add(ig_tmp) assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org] + + def test_job_instance_groups_cache_default(self, instance_group_factory, inventory, project, default_instance_group): + jt = JobTemplate.objects.create(inventory=inventory, project=project) + job = jt.create_unified_job() + print(job.preferred_instance_groups_cache) + print(default_instance_group) + assert job.preferred_instance_groups_cache == [default_instance_group.id] + + def test_job_instance_groups_cache_default_additional_items(self, instance_group_factory, inventory, project, default_instance_group): + ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()]) + ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()]) + ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()]) + project.organization.instance_groups.add(ig_org) + inventory.instance_groups.add(ig_inv) + jt = JobTemplate.objects.create(inventory=inventory, project=project) + jt.instance_groups.add(ig_tmp) + job = jt.create_unified_job() + assert job.preferred_instance_groups_cache == [ig_tmp.id, ig_inv.id, ig_org.id] + + def test_job_instance_groups_cache_prompt(self, instance_group_factory, inventory, project, default_instance_group): + ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()]) + ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()]) + ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()]) + project.organization.instance_groups.add(ig_org) + inventory.instance_groups.add(ig_inv) + jt = JobTemplate.objects.create(inventory=inventory, project=project) + job = jt.create_unified_job(instance_groups=[ig_tmp]) + assert job.preferred_instance_groups_cache == [ig_tmp.id] diff --git a/awx/main/tests/functional/test_jobs.py b/awx/main/tests/functional/test_jobs.py index a6626ce9c6..da3b9fd57c 100644 --- a/awx/main/tests/functional/test_jobs.py +++ b/awx/main/tests/functional/test_jobs.py @@ -3,7 +3,20 @@ import pytest from unittest import mock import json -from awx.main.models import Job, Instance, JobHostSummary, InventoryUpdate, InventorySource, Project, ProjectUpdate, SystemJob, AdHocCommand +from awx.main.models import ( + Job, + Instance, + JobHostSummary, + InventoryUpdate, + InventorySource, + Project, + ProjectUpdate, + SystemJob, + AdHocCommand, + InstanceGroup, + Label, + ExecutionEnvironment, +) from awx.main.tasks.system import cluster_node_heartbeat from django.test.utils import override_settings @@ -103,14 +116,88 @@ def test_job_notification_host_data(inventory, machine_credential, project, job_ class TestLaunchConfig: def test_null_creation_from_prompts(self): job = Job.objects.create() - data = {"credentials": [], "extra_vars": {}, "limit": None, "job_type": None} + data = { + "credentials": [], + "extra_vars": {}, + "limit": None, + "job_type": None, + "execution_environment": None, + "instance_groups": None, + "labels": None, + "forks": None, + "timeout": None, + "job_slice_count": None, + } config = job.create_config_from_prompts(data) assert config is None def test_only_limit_defined(self, job_template): job = Job.objects.create(job_template=job_template) - data = {"credentials": [], "extra_vars": {}, "job_tags": None, "limit": ""} + data = { + "credentials": [], + "extra_vars": {}, + "job_tags": None, + "limit": "", + "execution_environment": None, + "instance_groups": None, + "labels": None, + "forks": None, + "timeout": None, + "job_slice_count": None, + } config = job.create_config_from_prompts(data) assert config.char_prompts == {"limit": ""} assert not config.credentials.exists() assert config.prompts_dict() == {"limit": ""} + + def test_many_to_many_fields(self, job_template, organization): + job = Job.objects.create(job_template=job_template) + ig1 = InstanceGroup.objects.create(name='bar') + ig2 = InstanceGroup.objects.create(name='foo') + job_template.instance_groups.add(ig2) + label1 = Label.objects.create(name='foo', description='bar', organization=organization) + label2 = Label.objects.create(name='faz', description='baz', organization=organization) + # Order should matter here which is why we do 2 and then 1 + data = { + "credentials": [], + "extra_vars": {}, + "job_tags": None, + "limit": None, + "execution_environment": None, + "instance_groups": [ig2, ig1], + "labels": [label2, label1], + "forks": None, + "timeout": None, + "job_slice_count": None, + } + config = job.create_config_from_prompts(data) + + assert config.instance_groups.exists() + config_instance_group_ids = [item.id for item in config.instance_groups.all()] + assert config_instance_group_ids == [ig2.id, ig1.id] + + assert config.labels.exists() + config_label_ids = [item.id for item in config.labels.all()] + assert config_label_ids == [label2.id, label1.id] + + def test_pk_field(self, job_template, organization): + job = Job.objects.create(job_template=job_template) + ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar') + # Order should matter here which is why we do 2 and then 1 + data = { + "credentials": [], + "extra_vars": {}, + "job_tags": None, + "limit": None, + "execution_environment": ee, + "instance_groups": [], + "labels": [], + "forks": None, + "timeout": None, + "job_slice_count": None, + } + config = job.create_config_from_prompts(data) + + assert config.execution_environment + # We just write the PK instead of trying to assign an item, that happens on the save + assert config.execution_environment_id == ee.id diff --git a/awx/main/tests/functional/test_rbac_job.py b/awx/main/tests/functional/test_rbac_job.py index f260f7b72c..4f17aab45d 100644 --- a/awx/main/tests/functional/test_rbac_job.py +++ b/awx/main/tests/functional/test_rbac_job.py @@ -3,7 +3,20 @@ import pytest from rest_framework.exceptions import PermissionDenied from awx.main.access import JobAccess, JobLaunchConfigAccess, AdHocCommandAccess, InventoryUpdateAccess, ProjectUpdateAccess -from awx.main.models import Job, JobLaunchConfig, JobTemplate, AdHocCommand, InventoryUpdate, InventorySource, ProjectUpdate, User, Credential +from awx.main.models import ( + Job, + JobLaunchConfig, + JobTemplate, + AdHocCommand, + InventoryUpdate, + InventorySource, + ProjectUpdate, + User, + Credential, + ExecutionEnvironment, + InstanceGroup, + Label, +) from crum import impersonate @@ -302,13 +315,33 @@ class TestLaunchConfigAccess: access = JobLaunchConfigAccess(rando) cred1, cred2 = self._make_two_credentials(credentialtype_ssh) - assert access.has_credentials_access(config) # has access if 0 creds + assert access.has_obj_m2m_access(config) # has access if 0 creds config.credentials.add(cred1, cred2) - assert not access.has_credentials_access(config) # lacks access to both + assert not access.has_obj_m2m_access(config) # lacks access to both cred1.use_role.members.add(rando) - assert not access.has_credentials_access(config) # lacks access to 1 + assert not access.has_obj_m2m_access(config) # lacks access to 1 cred2.use_role.members.add(rando) - assert access.has_credentials_access(config) # has access to both + assert access.has_obj_m2m_access(config) # has access to both + + def test_new_execution_environment_access(self, rando): + ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar') + access = JobLaunchConfigAccess(rando) + + assert access.can_add({'execution_environment': ee}) # can add because access to ee will be granted + + def test_new_label_access(self, rando, organization): + label = Label.objects.create(name='foo', description='bar', organization=organization) + access = JobLaunchConfigAccess(rando) + + assert not access.can_add({'labels': [label]}) # can't add because no access to label + # We assert in JT unit tests that the access will be granted if label is in JT + + def test_new_instance_group_access(self, rando): + ig = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2) + access = JobLaunchConfigAccess(rando) + + assert not access.can_add({'instance_groups': [ig]}) # can't add because no access to ig + # We assert in JT unit tests that the access will be granted if instance group is in JT def test_can_use_minor(self, rando): # Config object only has flat-field overrides, no RBAC restrictions diff --git a/awx/main/tests/functional/test_rbac_workflow.py b/awx/main/tests/functional/test_rbac_workflow.py index d48eb3f80b..4c29907519 100644 --- a/awx/main/tests/functional/test_rbac_workflow.py +++ b/awx/main/tests/functional/test_rbac_workflow.py @@ -6,6 +6,7 @@ from awx.main.access import ( WorkflowJobAccess, # WorkflowJobNodeAccess ) +from awx.main.models import JobTemplate, WorkflowJobTemplateNode from rest_framework.exceptions import PermissionDenied @@ -87,6 +88,16 @@ class TestWorkflowJobTemplateNodeAccess: job_template.read_role.members.add(rando) assert not access.can_add({'workflow_job_template': wfjt, 'unified_job_template': job_template}) + def test_change_JT_no_start_perm(self, wfjt, rando): + wfjt.admin_role.members.add(rando) + access = WorkflowJobTemplateNodeAccess(rando) + jt1 = JobTemplate.objects.create() + jt1.execute_role.members.add(rando) + assert access.can_add({'workflow_job_template': wfjt, 'unified_job_template': jt1}) + node = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=jt1) + jt2 = JobTemplate.objects.create() + assert not access.can_change(node, {'unified_job_template': jt2.id}) + def test_add_node_with_minimum_permissions(self, wfjt, job_template, inventory, rando): wfjt.admin_role.members.add(rando) access = WorkflowJobTemplateNodeAccess(rando) @@ -101,6 +112,92 @@ class TestWorkflowJobTemplateNodeAccess: access = WorkflowJobTemplateNodeAccess(rando) assert access.can_delete(wfjt_node) + @pytest.mark.parametrize( + "add_wfjt_admin, add_jt_admin, permission_type, expected_result, method_type", + [ + (True, False, 'credentials', False, 'can_attach'), + (True, True, 'credentials', True, 'can_attach'), + (True, False, 'labels', False, 'can_attach'), + (True, True, 'labels', True, 'can_attach'), + (True, False, 'instance_groups', False, 'can_attach'), + (True, True, 'instance_groups', True, 'can_attach'), + (True, False, 'credentials', False, 'can_unattach'), + (True, True, 'credentials', True, 'can_unattach'), + (True, False, 'labels', False, 'can_unattach'), + (True, True, 'labels', True, 'can_unattach'), + (True, False, 'instance_groups', False, 'can_unattach'), + (True, True, 'instance_groups', True, 'can_unattach'), + ], + ) + def test_attacher_permissions(self, wfjt_node, job_template, rando, add_wfjt_admin, permission_type, add_jt_admin, expected_result, mocker, method_type): + wfjt = wfjt_node.workflow_job_template + if add_wfjt_admin: + wfjt.admin_role.members.add(rando) + wfjt.unified_job_template = job_template + if add_jt_admin: + job_template.execute_role.members.add(rando) + + from awx.main.models import Credential, Label, InstanceGroup, Organization, CredentialType + + if permission_type == 'credentials': + sub_obj = Credential.objects.create(credential_type=CredentialType.objects.create()) + sub_obj.use_role.members.add(rando) + elif permission_type == 'labels': + sub_obj = Label.objects.create(organization=Organization.objects.create()) + sub_obj.organization.member_role.members.add(rando) + elif permission_type == 'instance_groups': + sub_obj = InstanceGroup.objects.create() + org = Organization.objects.create() + org.admin_role.members.add(rando) # only admins can see IGs + org.instance_groups.add(sub_obj) + + access = WorkflowJobTemplateNodeAccess(rando) + if method_type == 'can_unattach': + assert getattr(access, method_type)(wfjt_node, sub_obj, permission_type) == expected_result + else: + assert getattr(access, method_type)(wfjt_node, sub_obj, permission_type, {}) == expected_result + + # The actual attachment of labels, credentials and instance groups are tested from JobLaunchConfigAccess + + @pytest.mark.parametrize( + "attachment_type, expect_exception, method_type", + [ + ("credentials", False, 'can_attach'), + ("labels", False, 'can_attach'), + ("instance_groups", False, 'can_attach'), + ("success_nodes", False, 'can_attach'), + ("failure_nodes", False, 'can_attach'), + ("always_nodes", False, 'can_attach'), + ("junk", True, 'can_attach'), + ("credentials", False, 'can_unattach'), + ("labels", False, 'can_unattach'), + ("instance_groups", False, 'can_unattach'), + ("success_nodes", False, 'can_unattach'), + ("failure_nodes", False, 'can_unattach'), + ("always_nodes", False, 'can_unattach'), + ("junk", True, 'can_unattach'), + ], + ) + def test_attacher_raise_not_implemented(self, wfjt_node, rando, attachment_type, expect_exception, method_type): + wfjt = wfjt_node.workflow_job_template + wfjt.admin_role.members.add(rando) + access = WorkflowJobTemplateNodeAccess(rando) + if expect_exception: + with pytest.raises(NotImplementedError): + access.can_attach(wfjt_node, None, attachment_type, None) + else: + try: + getattr(access, method_type)(wfjt_node, None, attachment_type, None) + except NotImplementedError: + # We explicitly catch NotImplemented because the _nodes type will raise a different exception + assert False, "Exception was raised when it should not have been" + except Exception: + # File "/awx_devel/awx/main/access.py", line 2074, in check_same_WFJT + # raise Exception('Attaching workflow nodes only allowed for other nodes') + pass + + # TODO: Implement additional tests for _nodes attachments here + @pytest.mark.django_db class TestWorkflowJobAccess: diff --git a/awx/main/tests/unit/api/serializers/test_primary_key_related_field.py b/awx/main/tests/unit/api/serializers/test_primary_key_related_field.py index 101bb5de4b..3c9bce527e 100644 --- a/awx/main/tests/unit/api/serializers/test_primary_key_related_field.py +++ b/awx/main/tests/unit/api/serializers/test_primary_key_related_field.py @@ -8,9 +8,17 @@ from rest_framework.exceptions import ValidationError from awx.api.serializers import JobLaunchSerializer -def test_primary_key_related_field(): +@pytest.mark.parametrize( + "param", + [ + ('credentials'), + ('instance_groups'), + ('labels'), + ], +) +def test_primary_key_related_field(param): # We are testing if the PrimaryKeyRelatedField in this serializer can take dictionary. # PrimaryKeyRelatedField should not be able to take dictionary as input, and should raise a ValidationError. - data = {'credentials': {'1': '2', '3': '4'}} + data = {param: {'1': '2', '3': '4'}} with pytest.raises(ValidationError): JobLaunchSerializer(data=data) diff --git a/awx/main/tests/unit/api/serializers/test_workflow_serializers.py b/awx/main/tests/unit/api/serializers/test_workflow_serializers.py index 526f06c4c9..9e7fe51344 100644 --- a/awx/main/tests/unit/api/serializers/test_workflow_serializers.py +++ b/awx/main/tests/unit/api/serializers/test_workflow_serializers.py @@ -11,6 +11,7 @@ from awx.api.serializers import ( from awx.main.models import Job, WorkflowJobTemplateNode, WorkflowJob, WorkflowJobNode, WorkflowJobTemplate, Project, Inventory, JobTemplate +@pytest.mark.django_db @mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x, y: {}) class TestWorkflowJobTemplateSerializerGetRelated: @pytest.fixture @@ -26,6 +27,7 @@ class TestWorkflowJobTemplateSerializerGetRelated: 'launch', 'workflow_nodes', 'webhook_key', + 'labels', ], ) def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name): @@ -58,6 +60,7 @@ class TestWorkflowNodeBaseSerializerGetRelated: assert 'unified_job_template' not in related +@pytest.mark.django_db @mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x, y: {}) class TestWorkflowJobTemplateNodeSerializerGetRelated: @pytest.fixture @@ -87,6 +90,8 @@ class TestWorkflowJobTemplateNodeSerializerGetRelated: 'success_nodes', 'failure_nodes', 'always_nodes', + 'labels', + 'instance_groups', ], ) def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name): @@ -146,6 +151,7 @@ class TestWorkflowJobTemplateNodeSerializerCharPrompts: assert WFJT_serializer.instance.limit == 'webservers' +@pytest.mark.django_db @mock.patch('awx.api.serializers.BaseSerializer.validate', lambda self, attrs: attrs) class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: @pytest.fixture @@ -162,7 +168,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: def test_set_survey_passwords_create(self, jt): serializer = WorkflowJobTemplateNodeSerializer() - wfjt = WorkflowJobTemplate(name='fake-wfjt') + wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}}) assert 'survey_passwords' in attrs assert 'var1' in attrs['survey_passwords'] @@ -171,7 +177,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: def test_set_survey_passwords_modify(self, jt): serializer = WorkflowJobTemplateNodeSerializer() - wfjt = WorkflowJobTemplate(name='fake-wfjt') + wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}}) assert 'survey_passwords' in attrs @@ -181,7 +187,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: def test_use_db_answer(self, jt, mocker): serializer = WorkflowJobTemplateNodeSerializer() - wfjt = WorkflowJobTemplate(name='fake-wfjt') + wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt, extra_data={'var1': '$encrypted$foooooo'}) with mocker.patch('awx.main.models.mixins.decrypt_value', return_value='foo'): attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}}) @@ -196,7 +202,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: with that particular var omitted so on launch time the default takes effect """ serializer = WorkflowJobTemplateNodeSerializer() - wfjt = WorkflowJobTemplate(name='fake-wfjt') + wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt') jt.survey_spec['spec'][0]['default'] = '$encrypted$bar' attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}}) assert 'survey_passwords' in attrs @@ -230,6 +236,8 @@ class TestWorkflowJobNodeSerializerGetRelated: 'success_nodes', 'failure_nodes', 'always_nodes', + 'labels', + 'instance_groups', ], ) def test_get_related(self, test_get_related, workflow_job_node, related_resource_name): diff --git a/awx/main/tests/unit/api/test_views.py b/awx/main/tests/unit/api/test_views.py index 7f69c816e7..e2a482e1dc 100644 --- a/awx/main/tests/unit/api/test_views.py +++ b/awx/main/tests/unit/api/test_views.py @@ -59,7 +59,7 @@ class TestApiRootView: class TestJobTemplateLabelList: def test_inherited_mixin_unattach(self): - with mock.patch('awx.api.generics.DeleteLastUnattachLabelMixin.unattach') as mixin_unattach: + with mock.patch('awx.api.views.labels.LabelSubListCreateAttachDetachView.unattach') as mixin_unattach: view = JobTemplateLabelList() mock_request = mock.MagicMock() diff --git a/awx/main/tests/unit/models/test_label.py b/awx/main/tests/unit/models/test_label.py index 0d5b5b76c0..e049a88578 100644 --- a/awx/main/tests/unit/models/test_label.py +++ b/awx/main/tests/unit/models/test_label.py @@ -1,9 +1,15 @@ import pytest from unittest import mock -from awx.main.models.label import Label -from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob -from awx.main.models.inventory import Inventory +from awx.main.models import ( + Label, + UnifiedJobTemplate, + UnifiedJob, + Inventory, + Schedule, + WorkflowJobTemplateNode, + WorkflowJobNode, +) mock_query_set = mock.MagicMock() @@ -14,12 +20,6 @@ mock_objects = mock.MagicMock(filter=mock.MagicMock(return_value=mock_query_set) @pytest.mark.django_db @mock.patch('awx.main.models.label.Label.objects', mock_objects) class TestLabelFilterMocked: - def test_get_orphaned_labels(self, mocker): - ret = Label.get_orphaned_labels() - - assert mock_query_set == ret - Label.objects.filter.assert_called_with(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True) - def test_is_detached(self, mocker): mock_query_set.exists.return_value = True @@ -27,7 +27,15 @@ class TestLabelFilterMocked: ret = label.is_detached() assert ret is True - Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True) + Label.objects.filter.assert_called_with( + id=37, + unifiedjob_labels__isnull=True, + unifiedjobtemplate_labels__isnull=True, + inventory_labels__isnull=True, + schedule_labels__isnull=True, + workflowjobtemplatenode_labels__isnull=True, + workflowjobnode_labels__isnull=True, + ) mock_query_set.exists.assert_called_with() def test_is_detached_not(self, mocker): @@ -37,39 +45,102 @@ class TestLabelFilterMocked: ret = label.is_detached() assert ret is False - Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True) + Label.objects.filter.assert_called_with( + id=37, + unifiedjob_labels__isnull=True, + unifiedjobtemplate_labels__isnull=True, + inventory_labels__isnull=True, + schedule_labels__isnull=True, + workflowjobtemplatenode_labels__isnull=True, + workflowjobnode_labels__isnull=True, + ) + mock_query_set.exists.assert_called_with() @pytest.mark.parametrize( - "jt_count,j_count,inv_count,expected", + "jt_count,j_count,inv_count,sched_count,wfnode_count,wfnodej_count,expected", [ - (1, 0, 0, True), - (0, 1, 0, True), - (0, 0, 1, True), - (1, 1, 1, False), + (1, 0, 0, 0, 0, 0, True), + (0, 1, 0, 0, 0, 0, True), + (1, 1, 0, 0, 0, 0, False), + (0, 0, 1, 0, 0, 0, True), + (1, 0, 1, 0, 0, 0, False), + (0, 1, 1, 0, 0, 0, False), + (1, 1, 1, 0, 0, 0, False), + (0, 0, 0, 1, 0, 0, True), + (1, 0, 0, 1, 0, 0, False), + (0, 1, 0, 1, 0, 0, False), + (1, 1, 0, 1, 0, 0, False), + (0, 0, 1, 1, 0, 0, False), + (1, 0, 1, 1, 0, 0, False), + (0, 1, 1, 1, 0, 0, False), + (1, 1, 1, 1, 0, 0, False), + (0, 0, 0, 0, 1, 0, True), + (1, 0, 0, 0, 1, 0, False), + (0, 1, 0, 0, 1, 0, False), + (1, 1, 0, 0, 1, 0, False), + (0, 0, 1, 0, 1, 0, False), + (1, 0, 1, 0, 1, 0, False), + (0, 1, 1, 0, 1, 0, False), + (1, 1, 1, 0, 1, 0, False), + (0, 0, 0, 1, 1, 0, False), + (1, 0, 0, 1, 1, 0, False), + (0, 1, 0, 1, 1, 0, False), + (1, 1, 0, 1, 1, 0, False), + (0, 0, 1, 1, 1, 0, False), + (1, 0, 1, 1, 1, 0, False), + (0, 1, 1, 1, 1, 0, False), + (1, 1, 1, 1, 1, 0, False), + (0, 0, 0, 0, 0, 1, True), + (1, 0, 0, 0, 0, 1, False), + (0, 1, 0, 0, 0, 1, False), + (1, 1, 0, 0, 0, 1, False), + (0, 0, 1, 0, 0, 1, False), + (1, 0, 1, 0, 0, 1, False), + (0, 1, 1, 0, 0, 1, False), + (1, 1, 1, 0, 0, 1, False), + (0, 0, 0, 1, 0, 1, False), + (1, 0, 0, 1, 0, 1, False), + (0, 1, 0, 1, 0, 1, False), + (1, 1, 0, 1, 0, 1, False), + (0, 0, 1, 1, 0, 1, False), + (1, 0, 1, 1, 0, 1, False), + (0, 1, 1, 1, 0, 1, False), + (1, 1, 1, 1, 0, 1, False), + (0, 0, 0, 0, 1, 1, False), + (1, 0, 0, 0, 1, 1, False), + (0, 1, 0, 0, 1, 1, False), + (1, 1, 0, 0, 1, 1, False), + (0, 0, 1, 0, 1, 1, False), + (1, 0, 1, 0, 1, 1, False), + (0, 1, 1, 0, 1, 1, False), + (1, 1, 1, 0, 1, 1, False), + (0, 0, 0, 1, 1, 1, False), + (1, 0, 0, 1, 1, 1, False), + (0, 1, 0, 1, 1, 1, False), + (1, 1, 0, 1, 1, 1, False), + (0, 0, 1, 1, 1, 1, False), + (1, 0, 1, 1, 1, 1, False), + (0, 1, 1, 1, 1, 1, False), + (1, 1, 1, 1, 1, 1, False), ], ) - def test_is_candidate_for_detach(self, mocker, jt_count, j_count, inv_count, expected): - mock_job_qs = mocker.MagicMock() - mock_job_qs.count = mocker.MagicMock(return_value=j_count) - mocker.patch.object(UnifiedJob, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_job_qs))) - - mock_jt_qs = mocker.MagicMock() - mock_jt_qs.count = mocker.MagicMock(return_value=jt_count) - mocker.patch.object(UnifiedJobTemplate, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_jt_qs))) - - mock_inv_qs = mocker.MagicMock() - mock_inv_qs.count = mocker.MagicMock(return_value=inv_count) - mocker.patch.object(Inventory, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_inv_qs))) + def test_is_candidate_for_detach(self, mocker, jt_count, j_count, inv_count, sched_count, wfnode_count, wfnodej_count, expected): + counts = [jt_count, j_count, inv_count, sched_count, wfnode_count, wfnodej_count] + models = [UnifiedJobTemplate, UnifiedJob, Inventory, Schedule, WorkflowJobTemplateNode, WorkflowJobNode] + mockers = [] + for index in range(0, len(models)): + a_mocker = mocker.MagicMock() + a_mocker.count = mocker.MagicMock(return_value=counts[index]) + mocker.patch.object(models[index], 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=a_mocker))) + mockers.append(a_mocker) label = Label(id=37) ret = label.is_candidate_for_detach() - UnifiedJob.objects.filter.assert_called_with(labels__in=[label.id]) - UnifiedJobTemplate.objects.filter.assert_called_with(labels__in=[label.id]) - Inventory.objects.filter.assert_called_with(labels__in=[label.id]) - mock_job_qs.count.assert_called_with() - mock_jt_qs.count.assert_called_with() - mock_inv_qs.count.assert_called_with() + for index in range(0, len(models)): + models[index].objects.filter.assert_called_with(labels__in=[label.id]) + for index in range(0, len(mockers)): + mockers[index].count.assert_called_with() assert ret is expected diff --git a/awx/main/tests/unit/models/test_survey_models.py b/awx/main/tests/unit/models/test_survey_models.py index 9ec5673cd8..57058930ea 100644 --- a/awx/main/tests/unit/models/test_survey_models.py +++ b/awx/main/tests/unit/models/test_survey_models.py @@ -259,13 +259,14 @@ def test_survey_encryption_defaults(survey_spec_factory, question_type, default, @pytest.mark.survey +@pytest.mark.django_db class TestWorkflowSurveys: def test_update_kwargs_survey_defaults(self, survey_spec_factory): "Assure that the survey default over-rides a JT variable" spec = survey_spec_factory('var1') spec['spec'][0]['default'] = 3 spec['spec'][0]['required'] = False - wfjt = WorkflowJobTemplate(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="var1: 5") + wfjt = WorkflowJobTemplate.objects.create(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="var1: 5") updated_extra_vars = wfjt._update_unified_job_kwargs({}, {}) assert 'extra_vars' in updated_extra_vars assert json.loads(updated_extra_vars['extra_vars'])['var1'] == 3 @@ -277,7 +278,7 @@ class TestWorkflowSurveys: spec['spec'][0]['required'] = False spec['spec'][1]['required'] = True spec['spec'][2]['required'] = False - wfjt = WorkflowJobTemplate(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="question2: hiworld") + wfjt = WorkflowJobTemplate.objects.create(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="question2: hiworld") assert wfjt.variables_needed_to_start == ['question2'] assert not wfjt.can_start_without_user_input() @@ -311,6 +312,6 @@ class TestExtraVarsNoPrompt: self.process_vars_and_assert(jt, provided_vars, valid) def test_wfjt_extra_vars_counting(self, provided_vars, valid): - wfjt = WorkflowJobTemplate(name='foo', extra_vars={'tmpl_var': 'bar'}) + wfjt = WorkflowJobTemplate.objects.create(name='foo', extra_vars={'tmpl_var': 'bar'}) prompted_fields, ignored_fields, errors = wfjt._accept_or_ignore_job_kwargs(extra_vars=provided_vars) self.process_vars_and_assert(wfjt, provided_vars, valid) diff --git a/awx/main/tests/unit/models/test_workflow_unit.py b/awx/main/tests/unit/models/test_workflow_unit.py index f8bb1e9c84..dc01c3301f 100644 --- a/awx/main/tests/unit/models/test_workflow_unit.py +++ b/awx/main/tests/unit/models/test_workflow_unit.py @@ -94,7 +94,7 @@ def workflow_job_unit(): @pytest.fixture def workflow_job_template_unit(): - return WorkflowJobTemplate(name='workflow') + return WorkflowJobTemplate.objects.create(name='workflow') @pytest.fixture @@ -151,6 +151,7 @@ def test_node_getter_and_setters(): assert node.job_type == 'check' +@pytest.mark.django_db class TestWorkflowJobCreate: def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker): mock_create = mocker.MagicMock() @@ -165,6 +166,7 @@ class TestWorkflowJobCreate: unified_job_template=wfjt_node_no_prompts.unified_job_template, workflow_job=workflow_job_unit, identifier=mocker.ANY, + execution_environment=None, ) def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, credential, mocker): @@ -180,9 +182,11 @@ class TestWorkflowJobCreate: unified_job_template=wfjt_node_with_prompts.unified_job_template, workflow_job=workflow_job_unit, identifier=mocker.ANY, + execution_environment=None, ) +@pytest.mark.django_db @mock.patch('awx.main.models.workflow.WorkflowNodeBase.get_parent_nodes', lambda self: []) class TestWorkflowJobNodeJobKWARGS: """ @@ -231,4 +235,12 @@ class TestWorkflowJobNodeJobKWARGS: def test_get_ask_mapping_integrity(): - assert list(WorkflowJobTemplate.get_ask_mapping().keys()) == ['extra_vars', 'inventory', 'limit', 'scm_branch'] + assert list(WorkflowJobTemplate.get_ask_mapping().keys()) == [ + 'inventory', + 'limit', + 'scm_branch', + 'labels', + 'job_tags', + 'skip_tags', + 'extra_vars', + ] diff --git a/awx/main/tests/unit/test_access.py b/awx/main/tests/unit/test_access.py index 547af7b42c..0059cb4984 100644 --- a/awx/main/tests/unit/test_access.py +++ b/awx/main/tests/unit/test_access.py @@ -196,6 +196,7 @@ def test_jt_can_add_bad_data(user_unit): assert not access.can_add({'asdf': 'asdf'}) +@pytest.mark.django_db class TestWorkflowAccessMethods: @pytest.fixture def workflow(self, workflow_job_template_factory): diff --git a/awx/main/utils/common.py b/awx/main/utils/common.py index 627e38a1fb..5d06185f78 100644 --- a/awx/main/utils/common.py +++ b/awx/main/utils/common.py @@ -532,6 +532,10 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None): if kwargs and field_name in kwargs: override_field_val = kwargs[field_name] if isinstance(override_field_val, (set, list, QuerySet)): + # Labels are additive so we are going to add any src labels in addition to the override labels + if field_name == 'labels': + for jt_label in src_field_value.all(): + getattr(obj2, field_name).add(jt_label.id) getattr(obj2, field_name).add(*override_field_val) continue if override_field_val.__class__.__name__ == 'ManyRelatedManager': diff --git a/awx/ui/src/api/mixins/Labels.mixin.js b/awx/ui/src/api/mixins/Labels.mixin.js new file mode 100644 index 0000000000..98aae12034 --- /dev/null +++ b/awx/ui/src/api/mixins/Labels.mixin.js @@ -0,0 +1,49 @@ +const LabelsMixin = (parent) => + class extends parent { + readLabels(id, params) { + return this.http.get(`${this.baseUrl}${id}/labels/`, { + params, + }); + } + + readAllLabels(id) { + const fetchLabels = async (pageNo = 1, labels = []) => { + try { + const { data } = await this.http.get(`${this.baseUrl}${id}/labels/`, { + params: { + page: pageNo, + page_size: 200, + }, + }); + if (data?.next) { + return fetchLabels(pageNo + 1, labels.concat(data.results)); + } + return Promise.resolve({ + data: { + results: labels.concat(data.results), + }, + }); + } catch (error) { + return Promise.reject(error); + } + }; + + return fetchLabels(); + } + + associateLabel(id, label, orgId) { + return this.http.post(`${this.baseUrl}${id}/labels/`, { + name: label.name, + organization: orgId, + }); + } + + disassociateLabel(id, label) { + return this.http.post(`${this.baseUrl}${id}/labels/`, { + id: label.id, + disassociate: true, + }); + } + }; + +export default LabelsMixin; diff --git a/awx/ui/src/api/models/JobTemplates.js b/awx/ui/src/api/models/JobTemplates.js index 969ef8c8c3..d2c1eb7a5d 100644 --- a/awx/ui/src/api/models/JobTemplates.js +++ b/awx/ui/src/api/models/JobTemplates.js @@ -1,10 +1,11 @@ import Base from '../Base'; import NotificationsMixin from '../mixins/Notifications.mixin'; import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin'; +import LabelsMixin from '../mixins/Labels.mixin'; import SchedulesMixin from '../mixins/Schedules.mixin'; class JobTemplates extends SchedulesMixin( - InstanceGroupsMixin(NotificationsMixin(Base)) + InstanceGroupsMixin(NotificationsMixin(LabelsMixin(Base))) ) { constructor(http) { super(http); @@ -33,20 +34,6 @@ class JobTemplates extends SchedulesMixin( return this.http.get(`${this.baseUrl}${id}/launch/`); } - associateLabel(id, label, orgId) { - return this.http.post(`${this.baseUrl}${id}/labels/`, { - name: label.name, - organization: orgId, - }); - } - - disassociateLabel(id, label) { - return this.http.post(`${this.baseUrl}${id}/labels/`, { - id: label.id, - disassociate: true, - }); - } - readCredentials(id, params) { return this.http.get(`${this.baseUrl}${id}/credentials/`, { params, diff --git a/awx/ui/src/api/models/Schedules.js b/awx/ui/src/api/models/Schedules.js index 40655c0349..a32c620538 100644 --- a/awx/ui/src/api/models/Schedules.js +++ b/awx/ui/src/api/models/Schedules.js @@ -1,6 +1,8 @@ import Base from '../Base'; +import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin'; +import LabelsMixin from '../mixins/Labels.mixin'; -class Schedules extends Base { +class Schedules extends InstanceGroupsMixin(LabelsMixin(Base)) { constructor(http) { super(http); this.baseUrl = 'api/v2/schedules/'; diff --git a/awx/ui/src/api/models/WorkflowJobTemplateNodes.js b/awx/ui/src/api/models/WorkflowJobTemplateNodes.js index eab9c1ddca..fce36ad516 100644 --- a/awx/ui/src/api/models/WorkflowJobTemplateNodes.js +++ b/awx/ui/src/api/models/WorkflowJobTemplateNodes.js @@ -1,6 +1,8 @@ import Base from '../Base'; +import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin'; +import LabelsMixin from '../mixins/Labels.mixin'; -class WorkflowJobTemplateNodes extends Base { +class WorkflowJobTemplateNodes extends LabelsMixin(InstanceGroupsMixin(Base)) { constructor(http) { super(http); this.baseUrl = 'api/v2/workflow_job_template_nodes/'; diff --git a/awx/ui/src/api/models/WorkflowJobTemplates.js b/awx/ui/src/api/models/WorkflowJobTemplates.js index 4ec2758653..430b8caed2 100644 --- a/awx/ui/src/api/models/WorkflowJobTemplates.js +++ b/awx/ui/src/api/models/WorkflowJobTemplates.js @@ -1,8 +1,11 @@ import Base from '../Base'; import SchedulesMixin from '../mixins/Schedules.mixin'; import NotificationsMixin from '../mixins/Notifications.mixin'; +import LabelsMixin from '../mixins/Labels.mixin'; -class WorkflowJobTemplates extends SchedulesMixin(NotificationsMixin(Base)) { +class WorkflowJobTemplates extends SchedulesMixin( + NotificationsMixin(LabelsMixin(Base)) +) { constructor(http) { super(http); this.baseUrl = 'api/v2/workflow_job_templates/'; diff --git a/awx/ui/src/components/LabelSelect/LabelSelect.js b/awx/ui/src/components/LabelSelect/LabelSelect.js index f7c93ffccd..2aa06b77b6 100644 --- a/awx/ui/src/components/LabelSelect/LabelSelect.js +++ b/awx/ui/src/components/LabelSelect/LabelSelect.js @@ -1,6 +1,12 @@ import React, { useState, useEffect } from 'react'; import { func, arrayOf, number, shape, string, oneOfType } from 'prop-types'; -import { Select, SelectOption, SelectVariant } from '@patternfly/react-core'; +import { + Chip, + ChipGroup, + Select, + SelectOption, + SelectVariant, +} from '@patternfly/react-core'; import { t } from '@lingui/macro'; import { LabelsAPI } from 'api'; import useIsMounted from 'hooks/useIsMounted'; @@ -60,7 +66,12 @@ function LabelSelect({ value, placeholder, onChange, onError, createText }) { const renderOptions = (opts) => opts.map((option) => ( - + {option.name} )); @@ -73,6 +84,23 @@ function LabelSelect({ value, placeholder, onChange, onError, createText }) { } return null; }; + + const chipGroupComponent = () => ( + + {(selections || []).map((currentChip) => ( + { + onSelect(e, currentChip); + }} + > + {currentChip.name} + + ))} + + ); + return ( diff --git a/awx/ui/src/components/LabelSelect/LabelSelect.test.js b/awx/ui/src/components/LabelSelect/LabelSelect.test.js index 6e45148abd..53b1a18701 100644 --- a/awx/ui/src/components/LabelSelect/LabelSelect.test.js +++ b/awx/ui/src/components/LabelSelect/LabelSelect.test.js @@ -63,7 +63,7 @@ describe('', () => { const selectOptions = wrapper.find('SelectOption'); expect(selectOptions).toHaveLength(4); }); - test('Generate a label ', async () => { + test('Generate a label', async () => { let wrapper; const onChange = jest.fn(); LabelsAPI.read.mockReturnValue({ @@ -79,4 +79,33 @@ describe('', () => { await wrapper.find('Select').invoke('onSelect')({}, 'foo'); expect(onChange).toBeCalledWith([{ id: 'foo', name: 'foo' }]); }); + test('should handle read-only labels', async () => { + let wrapper; + const onChange = jest.fn(); + LabelsAPI.read.mockReturnValue({ + data: { + results: [ + { id: 1, name: 'read only' }, + { id: 2, name: 'not read only' }, + ], + }, + }); + await act(async () => { + wrapper = mount( + {}} + onChange={onChange} + /> + ); + }); + wrapper.find('SelectToggle').simulate('click'); + const selectOptions = wrapper.find('SelectOption'); + expect(selectOptions).toHaveLength(2); + expect(selectOptions.at(0).prop('isDisabled')).toBe(true); + expect(selectOptions.at(1).prop('isDisabled')).toBe(false); + }); }); diff --git a/awx/ui/src/components/LaunchButton/LaunchButton.js b/awx/ui/src/components/LaunchButton/LaunchButton.js index be6d444fc5..5f207e3be7 100644 --- a/awx/ui/src/components/LaunchButton/LaunchButton.js +++ b/awx/ui/src/components/LaunchButton/LaunchButton.js @@ -1,9 +1,7 @@ import React, { useState } from 'react'; import { useHistory } from 'react-router-dom'; import { number, shape } from 'prop-types'; - import { t } from '@lingui/macro'; - import { AdHocCommandsAPI, InventorySourcesAPI, @@ -24,6 +22,12 @@ function canLaunchWithoutPrompt(launchData) { !launchData.ask_variables_on_launch && !launchData.ask_limit_on_launch && !launchData.ask_scm_branch_on_launch && + !launchData.ask_execution_environment_on_launch && + !launchData.ask_labels_on_launch && + !launchData.ask_forks_on_launch && + !launchData.ask_job_slice_count_on_launch && + !launchData.ask_timeout_on_launch && + !launchData.ask_instance_groups_on_launch && !launchData.survey_enabled && (!launchData.passwords_needed_to_start || launchData.passwords_needed_to_start.length === 0) && @@ -37,6 +41,7 @@ function LaunchButton({ resource, children }) { const [showLaunchPrompt, setShowLaunchPrompt] = useState(false); const [launchConfig, setLaunchConfig] = useState(null); const [surveyConfig, setSurveyConfig] = useState(null); + const [labels, setLabels] = useState([]); const [isLaunching, setIsLaunching] = useState(false); const [error, setError] = useState(null); @@ -50,6 +55,11 @@ function LaunchButton({ resource, children }) { resource.type === 'workflow_job_template' ? WorkflowJobTemplatesAPI.readSurvey(resource.id) : JobTemplatesAPI.readSurvey(resource.id); + const readLabels = + resource.type === 'workflow_job_template' + ? WorkflowJobTemplatesAPI.readAllLabels(resource.id) + : JobTemplatesAPI.readAllLabels(resource.id); + try { const { data: launch } = await readLaunch; setLaunchConfig(launch); @@ -60,6 +70,19 @@ function LaunchButton({ resource, children }) { setSurveyConfig(data); } + if (launch.ask_labels_on_launch) { + const { + data: { results }, + } = await readLabels; + + const allLabels = results.map((label) => ({ + ...label, + isReadOnly: true, + })); + + setLabels(allLabels); + } + if (canLaunchWithoutPrompt(launch)) { await launchWithParams({}); } else { @@ -171,6 +194,7 @@ function LaunchButton({ resource, children }) { launchConfig={launchConfig} surveyConfig={surveyConfig} resource={resource} + labels={labels} onLaunch={launchWithParams} onCancel={() => setShowLaunchPrompt(false)} /> diff --git a/awx/ui/src/components/LaunchButton/LaunchButton.test.js b/awx/ui/src/components/LaunchButton/LaunchButton.test.js index 61ea5fd923..fcd7b155c9 100644 --- a/awx/ui/src/components/LaunchButton/LaunchButton.test.js +++ b/awx/ui/src/components/LaunchButton/LaunchButton.test.js @@ -37,6 +37,12 @@ describe('LaunchButton', () => { ask_variables_on_launch: false, ask_limit_on_launch: false, ask_scm_branch_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], }, diff --git a/awx/ui/src/components/LaunchPrompt/LaunchPrompt.js b/awx/ui/src/components/LaunchPrompt/LaunchPrompt.js index b892eab4b7..005c9069e9 100644 --- a/awx/ui/src/components/LaunchPrompt/LaunchPrompt.js +++ b/awx/ui/src/components/LaunchPrompt/LaunchPrompt.js @@ -5,6 +5,7 @@ import { Formik, useFormikContext } from 'formik'; import { useDismissableError } from 'hooks/useRequest'; import mergeExtraVars from 'util/prompt/mergeExtraVars'; import getSurveyValues from 'util/prompt/getSurveyValues'; +import createNewLabels from 'util/labels'; import ContentLoading from '../ContentLoading'; import ContentError from '../ContentError'; import useLaunchSteps from './useLaunchSteps'; @@ -15,7 +16,9 @@ function PromptModalForm({ onCancel, onSubmit, resource, + labels, surveyConfig, + instanceGroups, }) { const { setFieldTouched, values } = useFormikContext(); const [showDescription, setShowDescription] = useState(false); @@ -27,9 +30,15 @@ function PromptModalForm({ visitStep, visitAllSteps, contentError, - } = useLaunchSteps(launchConfig, surveyConfig, resource); + } = useLaunchSteps( + launchConfig, + surveyConfig, + resource, + labels, + instanceGroups + ); - const handleSubmit = () => { + const handleSubmit = async () => { const postValues = {}; const setValue = (key, value) => { if (typeof value !== 'undefined' && value !== null) { @@ -53,6 +62,27 @@ function PromptModalForm({ setValue('extra_vars', mergeExtraVars(extraVars, surveyValues)); setValue('scm_branch', values.scm_branch); setValue('verbosity', values.verbosity); + setValue('timeout', values.timeout); + setValue('forks', values.forks); + setValue('job_slice_count', values.job_slice_count); + setValue('execution_environment', values.execution_environment?.id); + + if (launchConfig.ask_instance_groups_on_launch) { + const instanceGroupIds = []; + values.instance_groups.forEach((instance_group) => { + instanceGroupIds.push(instance_group.id); + }); + setValue('instance_groups', instanceGroupIds); + } + + if (launchConfig.ask_labels_on_launch) { + const { labelIds } = createNewLabels( + values.labels, + resource.organization + ); + + setValue('labels', labelIds); + } onSubmit(postValues); }; @@ -137,6 +167,7 @@ function LaunchPrompt({ onCancel, onLaunch, resource = {}, + labels = [], surveyConfig, resourceDefaultCredentials = [], }) { @@ -148,7 +179,9 @@ function LaunchPrompt({ launchConfig={launchConfig} surveyConfig={surveyConfig} resource={resource} + labels={labels} resourceDefaultCredentials={resourceDefaultCredentials} + instanceGroups={[]} /> ); diff --git a/awx/ui/src/components/LaunchPrompt/LaunchPrompt.test.js b/awx/ui/src/components/LaunchPrompt/LaunchPrompt.test.js index 27263d479c..07563e1a2b 100644 --- a/awx/ui/src/components/LaunchPrompt/LaunchPrompt.test.js +++ b/awx/ui/src/components/LaunchPrompt/LaunchPrompt.test.js @@ -1,6 +1,8 @@ import React from 'react'; import { act, isElementOfType } from 'react-dom/test-utils'; import { + ExecutionEnvironmentsAPI, + InstanceGroupsAPI, InventoriesAPI, CredentialsAPI, CredentialTypesAPI, @@ -16,11 +18,16 @@ import CredentialsStep from './steps/CredentialsStep'; import CredentialPasswordsStep from './steps/CredentialPasswordsStep'; import OtherPromptsStep from './steps/OtherPromptsStep'; import PreviewStep from './steps/PreviewStep'; +import ExecutionEnvironmentStep from './steps/ExecutionEnvironmentStep'; +import InstanceGroupsStep from './steps/InstanceGroupsStep'; +import SurveyStep from './steps/SurveyStep'; jest.mock('../../api/models/Inventories'); +jest.mock('../../api/models/ExecutionEnvironments'); jest.mock('../../api/models/CredentialTypes'); jest.mock('../../api/models/Credentials'); jest.mock('../../api/models/JobTemplates'); +jest.mock('../../api/models/InstanceGroups'); let config; const resource = { @@ -62,6 +69,79 @@ describe('LaunchPrompt', () => { spec: [{ type: 'text', variable: 'foo' }], }, }); + InstanceGroupsAPI.read.mockResolvedValue({ + data: { + results: [ + { + id: 2, + type: 'instance_group', + url: '/api/v2/instance_groups/2/', + related: { + jobs: '/api/v2/instance_groups/2/jobs/', + instances: '/api/v2/instance_groups/2/instances/', + }, + name: 'default', + created: '2022-08-30T20:35:05.747132Z', + modified: '2022-08-30T20:35:05.756690Z', + capacity: 177, + consumed_capacity: 0, + percent_capacity_remaining: 100.0, + jobs_running: 0, + jobs_total: 2, + instances: 3, + is_container_group: false, + credential: null, + policy_instance_percentage: 100, + policy_instance_minimum: 0, + policy_instance_list: [], + pod_spec_override: '', + summary_fields: { + user_capabilities: { + edit: true, + delete: false, + }, + }, + }, + ], + count: 1, + }, + }); + ExecutionEnvironmentsAPI.read.mockResolvedValue({ + data: { + results: [ + { + id: 1, + type: 'execution_environment', + url: '/api/v2/execution_environments/1/', + related: { + activity_stream: + '/api/v2/execution_environments/1/activity_stream/', + unified_job_templates: + '/api/v2/execution_environments/1/unified_job_templates/', + copy: '/api/v2/execution_environments/1/copy/', + }, + summary_fields: { + execution_environment: {}, + user_capabilities: { + edit: true, + delete: true, + copy: true, + }, + }, + created: '2022-08-30T20:34:55.842997Z', + modified: '2022-08-30T20:34:55.859874Z', + name: 'AWX EE (latest)', + description: '', + organization: null, + image: 'quay.io/ansible/awx-ee:latest', + managed: false, + credential: null, + pull: '', + }, + ], + count: 1, + }, + }); config = { can_start_without_user_input: false, @@ -76,6 +156,12 @@ describe('LaunchPrompt', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: false, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -96,6 +182,8 @@ describe('LaunchPrompt', () => { ask_inventory_on_launch: true, ask_credential_on_launch: true, ask_scm_branch_on_launch: true, + ask_execution_environment_on_launch: true, + ask_instance_groups_on_launch: true, survey_enabled: true, passwords_needed_to_start: ['ssh_password'], defaults: { @@ -150,13 +238,15 @@ describe('LaunchPrompt', () => { const wizard = await waitForElement(wrapper, 'Wizard'); const steps = wizard.prop('steps'); - expect(steps).toHaveLength(6); + expect(steps).toHaveLength(8); expect(steps[0].name.props.children).toEqual('Inventory'); expect(steps[1].name.props.children).toEqual('Credentials'); expect(steps[2].name.props.children).toEqual('Credential passwords'); - expect(steps[3].name.props.children).toEqual('Other prompts'); - expect(steps[4].name.props.children).toEqual('Survey'); - expect(steps[5].name.props.children).toEqual('Preview'); + expect(steps[3].name.props.children).toEqual('Execution Environment'); + expect(steps[4].name.props.children).toEqual('Instance Groups'); + expect(steps[5].name.props.children).toEqual('Other prompts'); + expect(steps[6].name.props.children).toEqual('Survey'); + expect(steps[7].name.props.children).toEqual('Preview'); expect(wizard.find('WizardHeader').prop('title')).toBe('Launch | Foobar'); expect(wizard.find('WizardHeader').prop('description')).toBe( 'Foo Description' @@ -214,6 +304,58 @@ describe('LaunchPrompt', () => { expect(isElementOfType(steps[2].component, PreviewStep)).toEqual(true); }); + test('should add execution environment step', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + ); + }); + const wizard = await waitForElement(wrapper, 'Wizard'); + const steps = wizard.prop('steps'); + + expect(steps).toHaveLength(2); + expect(steps[0].name.props.children).toEqual('Execution Environment'); + expect( + isElementOfType(steps[0].component, ExecutionEnvironmentStep) + ).toEqual(true); + expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true); + }); + + test('should add instance groups step', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + ); + }); + const wizard = await waitForElement(wrapper, 'Wizard'); + const steps = wizard.prop('steps'); + + expect(steps).toHaveLength(2); + expect(steps[0].name.props.children).toEqual('Instance Groups'); + expect(isElementOfType(steps[0].component, InstanceGroupsStep)).toEqual( + true + ); + expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true); + }); + test('should add other prompts step', async () => { let wrapper; await act(async () => { @@ -237,4 +379,46 @@ describe('LaunchPrompt', () => { expect(isElementOfType(steps[0].component, OtherPromptsStep)).toEqual(true); expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true); }); + + test('should add survey step', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + ); + }); + const wizard = await waitForElement(wrapper, 'Wizard'); + const steps = wizard.prop('steps'); + + expect(steps).toHaveLength(2); + expect(steps[0].name.props.children).toEqual('Survey'); + expect(isElementOfType(steps[0].component, SurveyStep)).toEqual(true); + expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true); + }); }); diff --git a/awx/ui/src/components/LaunchPrompt/steps/CredentialsStep.js b/awx/ui/src/components/LaunchPrompt/steps/CredentialsStep.js index 1e8c1cd92b..a79f04405c 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/CredentialsStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/CredentialsStep.js @@ -132,7 +132,7 @@ function CredentialsStep({ ); return ( - <> +
{meta.error && ( )} @@ -208,7 +208,7 @@ function CredentialsStep({ }} renderItemChip={renderChip} /> - +
); } diff --git a/awx/ui/src/components/LaunchPrompt/steps/ExecutionEnvironmentStep.js b/awx/ui/src/components/LaunchPrompt/steps/ExecutionEnvironmentStep.js new file mode 100644 index 0000000000..83adee2880 --- /dev/null +++ b/awx/ui/src/components/LaunchPrompt/steps/ExecutionEnvironmentStep.js @@ -0,0 +1,118 @@ +import React, { useCallback, useEffect } from 'react'; +import { useHistory } from 'react-router-dom'; +import { t } from '@lingui/macro'; +import { useField } from 'formik'; +import { ExecutionEnvironmentsAPI } from 'api'; +import { getSearchableKeys } from 'components/PaginatedTable'; +import { getQSConfig, parseQueryString } from 'util/qs'; +import useRequest from 'hooks/useRequest'; +import OptionsList from '../../OptionsList'; +import ContentLoading from '../../ContentLoading'; +import ContentError from '../../ContentError'; + +const QS_CONFIG = getQSConfig('execution_environment', { + page: 1, + page_size: 5, +}); + +function ExecutionEnvironmentStep() { + const [field, , helpers] = useField('execution_environment'); + + const history = useHistory(); + + const { + isLoading, + error, + result: { + execution_environments, + count, + relatedSearchableKeys, + searchableKeys, + }, + request: fetchExecutionEnvironments, + } = useRequest( + useCallback(async () => { + const params = parseQueryString(QS_CONFIG, history.location.search); + const [{ data }, actionsResponse] = await Promise.all([ + ExecutionEnvironmentsAPI.read(params), + ExecutionEnvironmentsAPI.readOptions(), + ]); + return { + execution_environments: data.results, + count: data.count, + relatedSearchableKeys: ( + actionsResponse?.data?.related_search_fields || [] + ).map((val) => val.slice(0, -8)), + searchableKeys: getSearchableKeys(actionsResponse.data.actions?.GET), + }; + }, [history.location]), + { + count: 0, + execution_environments: [], + relatedSearchableKeys: [], + searchableKeys: [], + } + ); + + useEffect(() => { + fetchExecutionEnvironments(); + }, [fetchExecutionEnvironments]); + + if (isLoading) { + return ; + } + if (error) { + return ; + } + + return ( +
+ helpers.setValue(null)} + /> +
+ ); +} + +export default ExecutionEnvironmentStep; diff --git a/awx/ui/src/components/LaunchPrompt/steps/ExecutionEnvironmentStep.test.js b/awx/ui/src/components/LaunchPrompt/steps/ExecutionEnvironmentStep.test.js new file mode 100644 index 0000000000..208195ffec --- /dev/null +++ b/awx/ui/src/components/LaunchPrompt/steps/ExecutionEnvironmentStep.test.js @@ -0,0 +1,52 @@ +import React from 'react'; +import { act } from 'react-dom/test-utils'; +import { Formik } from 'formik'; +import { ExecutionEnvironmentsAPI } from 'api'; +import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; +import ExecutionEnvironmentStep from './ExecutionEnvironmentStep'; + +jest.mock('../../../api/models/ExecutionEnvironments'); + +const execution_environments = [ + { id: 1, name: 'ee one', url: '/execution_environments/1' }, + { id: 2, name: 'ee two', url: '/execution_environments/2' }, + { id: 3, name: 'ee three', url: '/execution_environments/3' }, +]; + +describe('ExecutionEnvironmentStep', () => { + beforeEach(() => { + ExecutionEnvironmentsAPI.read.mockResolvedValue({ + data: { + results: execution_environments, + count: 3, + }, + }); + + ExecutionEnvironmentsAPI.readOptions.mockResolvedValue({ + data: { + actions: { + GET: {}, + POST: {}, + }, + related_search_fields: [], + }, + }); + }); + + test('should load execution environments', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + + + ); + }); + wrapper.update(); + + expect(ExecutionEnvironmentsAPI.read).toHaveBeenCalled(); + expect(wrapper.find('OptionsList').prop('options')).toEqual( + execution_environments + ); + }); +}); diff --git a/awx/ui/src/components/LaunchPrompt/steps/InstanceGroupsStep.js b/awx/ui/src/components/LaunchPrompt/steps/InstanceGroupsStep.js new file mode 100644 index 0000000000..dfdb922bcc --- /dev/null +++ b/awx/ui/src/components/LaunchPrompt/steps/InstanceGroupsStep.js @@ -0,0 +1,108 @@ +import React, { useCallback, useEffect } from 'react'; +import { useHistory } from 'react-router-dom'; +import { t } from '@lingui/macro'; +import { useField } from 'formik'; +import { InstanceGroupsAPI } from 'api'; +import { getSearchableKeys } from 'components/PaginatedTable'; +import { getQSConfig, parseQueryString } from 'util/qs'; +import useRequest from 'hooks/useRequest'; +import useSelected from 'hooks/useSelected'; +import OptionsList from '../../OptionsList'; +import ContentLoading from '../../ContentLoading'; +import ContentError from '../../ContentError'; + +const QS_CONFIG = getQSConfig('instance-groups', { + page: 1, + page_size: 5, + order_by: 'name', +}); + +function InstanceGroupsStep() { + const [field, , helpers] = useField('instance_groups'); + const { selected, handleSelect, setSelected } = useSelected([], field.value); + + const history = useHistory(); + + const { + result: { instance_groups, count, relatedSearchableKeys, searchableKeys }, + request: fetchInstanceGroups, + error, + isLoading, + } = useRequest( + useCallback(async () => { + const params = parseQueryString(QS_CONFIG, history.location.search); + const [{ data }, actionsResponse] = await Promise.all([ + InstanceGroupsAPI.read(params), + InstanceGroupsAPI.readOptions(), + ]); + return { + instance_groups: data.results, + count: data.count, + relatedSearchableKeys: ( + actionsResponse?.data?.related_search_fields || [] + ).map((val) => val.slice(0, -8)), + searchableKeys: getSearchableKeys(actionsResponse.data.actions?.GET), + }; + }, [history.location]), + { + instance_groups: [], + count: 0, + relatedSearchableKeys: [], + searchableKeys: [], + } + ); + + useEffect(() => { + fetchInstanceGroups(); + }, [fetchInstanceGroups]); + + useEffect(() => { + helpers.setValue(selected); + }, [selected]); // eslint-disable-line react-hooks/exhaustive-deps + + if (isLoading) { + return ; + } + if (error) { + return ; + } + + return ( +
+ setSelected(selectedItems)} + isSelectedDraggable + /> +
+ ); +} + +export default InstanceGroupsStep; diff --git a/awx/ui/src/components/LaunchPrompt/steps/InstanceGroupsStep.test.js b/awx/ui/src/components/LaunchPrompt/steps/InstanceGroupsStep.test.js new file mode 100644 index 0000000000..b260f1ff3c --- /dev/null +++ b/awx/ui/src/components/LaunchPrompt/steps/InstanceGroupsStep.test.js @@ -0,0 +1,52 @@ +import React from 'react'; +import { act } from 'react-dom/test-utils'; +import { Formik } from 'formik'; +import { InstanceGroupsAPI } from 'api'; +import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; +import InstanceGroupsStep from './InstanceGroupsStep'; + +jest.mock('../../../api/models/InstanceGroups'); + +const instance_groups = [ + { id: 1, name: 'ig one', url: '/instance_groups/1' }, + { id: 2, name: 'ig two', url: '/instance_groups/2' }, + { id: 3, name: 'ig three', url: '/instance_groups/3' }, +]; + +describe('InstanceGroupsStep', () => { + beforeEach(() => { + InstanceGroupsAPI.read.mockResolvedValue({ + data: { + results: instance_groups, + count: 3, + }, + }); + + InstanceGroupsAPI.readOptions.mockResolvedValue({ + data: { + actions: { + GET: {}, + POST: {}, + }, + related_search_fields: [], + }, + }); + }); + + test('should load instance groups', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + + + ); + }); + wrapper.update(); + + expect(InstanceGroupsAPI.read).toHaveBeenCalled(); + expect(wrapper.find('OptionsList').prop('options')).toEqual( + instance_groups + ); + }); +}); diff --git a/awx/ui/src/components/LaunchPrompt/steps/InventoryStep.js b/awx/ui/src/components/LaunchPrompt/steps/InventoryStep.js index 494e54e815..3882abd602 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/InventoryStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/InventoryStep.js @@ -70,7 +70,7 @@ function InventoryStep({ warningMessage = null }) { } return ( - <> +
{meta.touched && meta.error && ( )} @@ -109,7 +109,7 @@ function InventoryStep({ warningMessage = null }) { selectItem={helpers.setValue} deselectItem={() => field.onChange(null)} /> - +
); } diff --git a/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.js b/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.js index 2e771299b2..b4c97312fd 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.js @@ -1,15 +1,17 @@ import React from 'react'; - import { t } from '@lingui/macro'; import { useField } from 'formik'; import { Form, FormGroup, Switch } from '@patternfly/react-core'; import styled from 'styled-components'; +import LabelSelect from '../../LabelSelect'; import FormField from '../../FormField'; import { TagMultiSelect } from '../../MultiSelect'; import AnsibleSelect from '../../AnsibleSelect'; import { VariablesField } from '../../CodeEditor'; import Popover from '../../Popover'; import { VerbositySelectField } from '../../VerbositySelectField'; +import jobHelpText from '../../../screens/Job/Job.helptext'; +import workflowHelpText from '../../../screens/Template/shared/WorkflowJobTemplate.helptext'; const FieldHeader = styled.div` display: flex; @@ -22,72 +24,105 @@ const FieldHeader = styled.div` `; function OtherPromptsStep({ launchConfig, variablesMode, onVarModeChange }) { + const helpTextSource = launchConfig.job_template_data + ? jobHelpText + : workflowHelpText; return ( -
{ - e.preventDefault(); - }} - > - {launchConfig.ask_job_type_on_launch && } - {launchConfig.ask_limit_on_launch && ( - - )} - {launchConfig.ask_scm_branch_on_launch && ( - - )} - {launchConfig.ask_verbosity_on_launch && } - {launchConfig.ask_diff_mode_on_launch && } - {launchConfig.ask_tags_on_launch && ( - - )} - {launchConfig.ask_skip_tags_on_launch && ( - - )} - {launchConfig.ask_variables_on_launch && ( - - )} - +
+
{ + e.preventDefault(); + }} + > + {launchConfig.ask_job_type_on_launch && ( + + )} + {launchConfig.ask_scm_branch_on_launch && ( + + )} + {launchConfig.ask_labels_on_launch && ( + + )} + {launchConfig.ask_forks_on_launch && ( + + )} + {launchConfig.ask_limit_on_launch && ( + + )} + {launchConfig.ask_verbosity_on_launch && ( + + )} + {launchConfig.ask_job_slice_count_on_launch && ( + + )} + {launchConfig.ask_timeout_on_launch && ( + + )} + {launchConfig.ask_diff_mode_on_launch && } + {launchConfig.ask_tags_on_launch && ( + + )} + {launchConfig.ask_skip_tags_on_launch && ( + + )} + {launchConfig.ask_variables_on_launch && ( + + )} + +
); } -function JobTypeField() { +function JobTypeField({ helpTextSource }) { const [field, meta, helpers] = useField('job_type'); const options = [ { @@ -107,15 +142,9 @@ function JobTypeField() { const isValid = !(meta.touched && meta.error); return ( - } + labelIcon={} isRequired validated={isValid ? 'default' : 'error'} > @@ -129,15 +158,14 @@ function JobTypeField() { ); } -function VerbosityField() { +function VerbosityField({ helpTextSource }) { const [, meta] = useField('verbosity'); const isValid = !(meta.touched && meta.error); return ( ); @@ -186,4 +214,25 @@ function TagField({ id, name, label, tooltip }) { ); } +function LabelsField({ helpTextSource }) { + const [field, meta, helpers] = useField('labels'); + + return ( + } + validated={!meta.touched || !meta.error ? 'default' : 'error'} + helperTextInvalid={meta.error} + > + helpers.setValue(labels)} + createText={t`Create`} + onError={(err) => helpers.setError(err)} + /> + + ); +} + export default OtherPromptsStep; diff --git a/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.test.js b/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.test.js index 5b5e5eb19a..cdaeb41995 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.test.js +++ b/awx/ui/src/components/LaunchPrompt/steps/OtherPromptsStep.test.js @@ -13,6 +13,11 @@ describe('OtherPromptsStep', () => { @@ -36,6 +41,11 @@ describe('OtherPromptsStep', () => { @@ -48,6 +58,81 @@ describe('OtherPromptsStep', () => { ); }); + test('should render timeout field', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + + + ); + }); + + expect(wrapper.find('FormField#prompt-timeout')).toHaveLength(1); + expect(wrapper.find('FormField#prompt-timeout input').prop('name')).toEqual( + 'timeout' + ); + }); + + test('should render forks field', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + + + ); + }); + + expect(wrapper.find('FormField#prompt-forks')).toHaveLength(1); + expect(wrapper.find('FormField#prompt-forks input').prop('name')).toEqual( + 'forks' + ); + }); + + test('should render job slicing field', async () => { + let wrapper; + await act(async () => { + wrapper = mountWithContexts( + + + + ); + }); + + expect(wrapper.find('FormField#prompt-job-slicing')).toHaveLength(1); + expect( + wrapper.find('FormField#prompt-job-slicing input').prop('name') + ).toEqual('job_slice_count'); + }); + test('should render source control branch field', async () => { let wrapper; await act(async () => { @@ -56,6 +141,11 @@ describe('OtherPromptsStep', () => { @@ -76,6 +166,11 @@ describe('OtherPromptsStep', () => { @@ -96,6 +191,11 @@ describe('OtherPromptsStep', () => { @@ -119,6 +219,11 @@ describe('OtherPromptsStep', () => { onVarModeChange={onModeChange} launchConfig={{ ask_variables_on_launch: true, + job_template_data: { + name: 'Demo Job Template', + id: 1, + description: '', + }, }} /> diff --git a/awx/ui/src/components/LaunchPrompt/steps/PreviewStep.js b/awx/ui/src/components/LaunchPrompt/steps/PreviewStep.js index b2612a55fb..753133b109 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/PreviewStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/PreviewStep.js @@ -52,7 +52,7 @@ function PreviewStep({ resource, launchConfig, surveyConfig, formErrors }) { } return ( - <> +
{formErrors && ( {t`Some of the previous step(s) have errors`} @@ -70,7 +70,7 @@ function PreviewStep({ resource, launchConfig, surveyConfig, formErrors }) { launchConfig={launchConfig} overrides={overrides} /> - +
); } diff --git a/awx/ui/src/components/LaunchPrompt/steps/SurveyStep.js b/awx/ui/src/components/LaunchPrompt/steps/SurveyStep.js index 9e526c984b..770983f05e 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/SurveyStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/SurveyStep.js @@ -31,16 +31,18 @@ function SurveyStep({ surveyConfig }) { float: NumberField, }; return ( -
{ - e.preventDefault(); - }} - > - {surveyConfig.spec.map((question) => { - const Field = fieldTypes[question.type]; - return ; - })} - +
+
{ + e.preventDefault(); + }} + > + {surveyConfig.spec.map((question) => { + const Field = fieldTypes[question.type]; + return ; + })} + +
); } SurveyStep.propTypes = { diff --git a/awx/ui/src/components/LaunchPrompt/steps/useCredentialsStep.js b/awx/ui/src/components/LaunchPrompt/steps/useCredentialsStep.js index bd49a26c77..735ae21a4d 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/useCredentialsStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/useCredentialsStep.js @@ -10,7 +10,7 @@ const STEP_ID = 'credentials'; export default function useCredentialsStep( launchConfig, resource, - resourceDefaultCredentials, + resourceDefaultCredentials = [], allowCredentialsWithPasswords = false ) { const [field, meta, helpers] = useField('credentials'); @@ -78,6 +78,6 @@ function getInitialValues(launchConfig, resourceDefaultCredentials) { } return { - credentials: resourceDefaultCredentials || [], + credentials: resourceDefaultCredentials, }; } diff --git a/awx/ui/src/components/LaunchPrompt/steps/useExecutionEnvironmentStep.js b/awx/ui/src/components/LaunchPrompt/steps/useExecutionEnvironmentStep.js new file mode 100644 index 0000000000..611330ad55 --- /dev/null +++ b/awx/ui/src/components/LaunchPrompt/steps/useExecutionEnvironmentStep.js @@ -0,0 +1,46 @@ +import React from 'react'; +import { t } from '@lingui/macro'; +import ExecutionEnvironmentStep from './ExecutionEnvironmentStep'; +import StepName from './StepName'; + +const STEP_ID = 'executionEnvironment'; + +export default function useExecutionEnvironmentStep(launchConfig, resource) { + return { + step: getStep(launchConfig, resource), + initialValues: getInitialValues(launchConfig, resource), + isReady: true, + contentError: null, + hasError: false, + setTouched: (setFieldTouched) => { + setFieldTouched('execution_environment', true, false); + }, + validate: () => {}, + }; +} +function getStep(launchConfig) { + if (!launchConfig.ask_execution_environment_on_launch) { + return null; + } + return { + id: STEP_ID, + name: ( + + {t`Execution Environment`} + + ), + component: , + enableNext: true, + }; +} + +function getInitialValues(launchConfig, resource) { + if (!launchConfig.ask_execution_environment_on_launch) { + return {}; + } + + return { + execution_environment: + resource?.summary_fields?.execution_environment || null, + }; +} diff --git a/awx/ui/src/components/LaunchPrompt/steps/useInstanceGroupsStep.js b/awx/ui/src/components/LaunchPrompt/steps/useInstanceGroupsStep.js new file mode 100644 index 0000000000..a15b868b69 --- /dev/null +++ b/awx/ui/src/components/LaunchPrompt/steps/useInstanceGroupsStep.js @@ -0,0 +1,45 @@ +import React from 'react'; +import { t } from '@lingui/macro'; +import InstanceGroupsStep from './InstanceGroupsStep'; +import StepName from './StepName'; + +const STEP_ID = 'instanceGroups'; + +export default function useInstanceGroupsStep( + launchConfig, + resource, + instanceGroups +) { + return { + step: getStep(launchConfig, resource), + initialValues: getInitialValues(launchConfig, instanceGroups), + isReady: true, + contentError: null, + hasError: false, + setTouched: (setFieldTouched) => { + setFieldTouched('instance_groups', true, false); + }, + validate: () => {}, + }; +} +function getStep(launchConfig) { + if (!launchConfig.ask_instance_groups_on_launch) { + return null; + } + return { + id: STEP_ID, + name: {t`Instance Groups`}, + component: , + enableNext: true, + }; +} + +function getInitialValues(launchConfig, instanceGroups) { + if (!launchConfig.ask_instance_groups_on_launch) { + return {}; + } + + return { + instance_groups: instanceGroups || [], + }; +} diff --git a/awx/ui/src/components/LaunchPrompt/steps/useOtherPromptsStep.js b/awx/ui/src/components/LaunchPrompt/steps/useOtherPromptsStep.js index ede4ae8231..620fe8337c 100644 --- a/awx/ui/src/components/LaunchPrompt/steps/useOtherPromptsStep.js +++ b/awx/ui/src/components/LaunchPrompt/steps/useOtherPromptsStep.js @@ -27,9 +27,14 @@ const FIELD_NAMES = [ 'job_tags', 'skip_tags', 'extra_vars', + 'labels', + 'timeout', + 'job_slice_count', + 'forks', + 'labels', ]; -export default function useOtherPromptsStep(launchConfig, resource) { +export default function useOtherPromptsStep(launchConfig, resource, labels) { const [variablesField] = useField('extra_vars'); const [variablesMode, setVariablesMode] = useState(null); const [isTouched, setIsTouched] = useState(false); @@ -59,7 +64,7 @@ export default function useOtherPromptsStep(launchConfig, resource) { return { step: getStep(launchConfig, hasError, variablesMode, handleModeChange), - initialValues: getInitialValues(launchConfig, resource), + initialValues: getInitialValues(launchConfig, resource, labels), isReady: true, contentError: null, hasError, @@ -105,11 +110,15 @@ function shouldShowPrompt(launchConfig) { launchConfig.ask_skip_tags_on_launch || launchConfig.ask_variables_on_launch || launchConfig.ask_scm_branch_on_launch || - launchConfig.ask_diff_mode_on_launch + launchConfig.ask_diff_mode_on_launch || + launchConfig.ask_labels_on_launch || + launchConfig.ask_forks_on_launch || + launchConfig.ask_job_slice_count_on_launch || + launchConfig.ask_timeout_on_launch ); } -function getInitialValues(launchConfig, resource) { +function getInitialValues(launchConfig, resource, labels) { const initialValues = {}; if (!launchConfig) { @@ -140,5 +149,17 @@ function getInitialValues(launchConfig, resource) { if (launchConfig.ask_diff_mode_on_launch) { initialValues.diff_mode = resource?.diff_mode || false; } + if (launchConfig.ask_forks_on_launch) { + initialValues.forks = resource?.forks || 0; + } + if (launchConfig.ask_job_slice_count_on_launch) { + initialValues.job_slice_count = resource?.job_slice_count || 1; + } + if (launchConfig.ask_timeout_on_launch) { + initialValues.timeout = resource?.timeout || 0; + } + if (launchConfig.ask_labels_on_launch) { + initialValues.labels = labels || []; + } return initialValues; } diff --git a/awx/ui/src/components/LaunchPrompt/useLaunchSteps.js b/awx/ui/src/components/LaunchPrompt/useLaunchSteps.js index 3d993162f9..7cbba9be8a 100644 --- a/awx/ui/src/components/LaunchPrompt/useLaunchSteps.js +++ b/awx/ui/src/components/LaunchPrompt/useLaunchSteps.js @@ -3,9 +3,11 @@ import { useFormikContext } from 'formik'; import useInventoryStep from './steps/useInventoryStep'; import useCredentialsStep from './steps/useCredentialsStep'; import useCredentialPasswordsStep from './steps/useCredentialPasswordsStep'; +import useExecutionEnvironmentStep from './steps/useExecutionEnvironmentStep'; import useOtherPromptsStep from './steps/useOtherPromptsStep'; import useSurveyStep from './steps/useSurveyStep'; import usePreviewStep from './steps/usePreviewStep'; +import useInstanceGroupsStep from './steps/useInstanceGroupsStep'; function showCredentialPasswordsStep(launchConfig, credentials = []) { if ( @@ -39,7 +41,13 @@ function showCredentialPasswordsStep(launchConfig, credentials = []) { return credentialPasswordStepRequired; } -export default function useLaunchSteps(launchConfig, surveyConfig, resource) { +export default function useLaunchSteps( + launchConfig, + surveyConfig, + resource, + labels, + instanceGroups +) { const [visited, setVisited] = useState({}); const [isReady, setIsReady] = useState(false); const { touched, values: formikValues } = useFormikContext(); @@ -56,7 +64,9 @@ export default function useLaunchSteps(launchConfig, surveyConfig, resource) { showCredentialPasswordsStep(launchConfig, formikValues.credentials), visited ), - useOtherPromptsStep(launchConfig, resource), + useExecutionEnvironmentStep(launchConfig, resource), + useInstanceGroupsStep(launchConfig, resource, instanceGroups), + useOtherPromptsStep(launchConfig, resource, labels), useSurveyStep(launchConfig, surveyConfig, resource, visited), ]; const { resetForm } = useFormikContext(); @@ -143,6 +153,8 @@ export default function useLaunchSteps(launchConfig, surveyConfig, resource) { inventory: true, credentials: true, credentialPasswords: true, + executionEnvironment: true, + instanceGroups: true, other: true, survey: true, preview: true, diff --git a/awx/ui/src/components/Lookup/ExecutionEnvironmentLookup.js b/awx/ui/src/components/Lookup/ExecutionEnvironmentLookup.js index bfd47ea40a..42767dcd85 100644 --- a/awx/ui/src/components/Lookup/ExecutionEnvironmentLookup.js +++ b/awx/ui/src/components/Lookup/ExecutionEnvironmentLookup.js @@ -10,9 +10,9 @@ import { getQSConfig, parseQueryString, mergeParams } from 'util/qs'; import useRequest from 'hooks/useRequest'; import Popover from '../Popover'; import OptionsList from '../OptionsList'; - import Lookup from './Lookup'; import LookupErrorMessage from './shared/LookupErrorMessage'; +import FieldWithPrompt from '../FieldWithPrompt'; const QS_CONFIG = getQSConfig('execution_environments', { page: 1, @@ -36,6 +36,9 @@ function ExecutionEnvironmentLookup({ value, fieldName, overrideLabel, + isPromptableField, + promptId, + promptName, }) { const location = useLocation(); const { @@ -150,49 +153,52 @@ function ExecutionEnvironmentLookup({ }, [fetchExecutionEnvironments]); const renderLookup = () => ( - ( - dispatch({ type: 'SELECT_ITEM', item })} - deselectItem={(item) => dispatch({ type: 'DESELECT_ITEM', item })} - /> - )} - /> + <> + ( + dispatch({ type: 'SELECT_ITEM', item })} + deselectItem={(item) => dispatch({ type: 'DESELECT_ITEM', item })} + /> + )} + /> + + ); const renderLabel = () => { @@ -202,7 +208,21 @@ function ExecutionEnvironmentLookup({ return t`Execution Environment`; }; - return ( + return isPromptableField ? ( + + {tooltip && isDisabled ? ( + {renderLookup()} + ) : ( + renderLookup() + )} + + ) : ( { expect( wrapper.find('FormGroup[label="Execution Environment"]').length ).toBe(1); + expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe( + 0 + ); }); test('should fetch execution environments', async () => { @@ -132,4 +135,25 @@ describe('ExecutionEnvironmentLookup', () => { page_size: 5, }); }); + + test('should render prompt on launch checkbox when necessary', async () => { + await act(async () => { + wrapper = mountWithContexts( + + {}} + projectId={12} + globallyAvailable + isPromptableField + promptId="ee-prompt" + promptName="ask_execution_environment_on_launch" + /> + + ); + }); + expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe( + 1 + ); + }); }); diff --git a/awx/ui/src/components/Lookup/InstanceGroupsLookup.js b/awx/ui/src/components/Lookup/InstanceGroupsLookup.js index 497c7b081a..848d54bcfd 100644 --- a/awx/ui/src/components/Lookup/InstanceGroupsLookup.js +++ b/awx/ui/src/components/Lookup/InstanceGroupsLookup.js @@ -1,7 +1,6 @@ import React, { useCallback, useEffect } from 'react'; import { arrayOf, string, func, bool } from 'prop-types'; import { withRouter } from 'react-router-dom'; - import { t, Trans } from '@lingui/macro'; import { FormGroup } from '@patternfly/react-core'; import { InstanceGroupsAPI } from 'api'; @@ -13,6 +12,7 @@ import Popover from '../Popover'; import OptionsList from '../OptionsList'; import Lookup from './Lookup'; import LookupErrorMessage from './shared/LookupErrorMessage'; +import FieldWithPrompt from '../FieldWithPrompt'; const QS_CONFIG = getQSConfig('instance-groups', { page: 1, @@ -21,6 +21,7 @@ const QS_CONFIG = getQSConfig('instance-groups', { }); function InstanceGroupsLookup({ + id, value, onChange, tooltip, @@ -29,6 +30,9 @@ function InstanceGroupsLookup({ history, fieldName, validate, + isPromptableField, + promptId, + promptName, }) { const { result: { instanceGroups, count, relatedSearchableKeys, searchableKeys }, @@ -63,13 +67,8 @@ function InstanceGroupsLookup({ fetchInstanceGroups(); }, [fetchInstanceGroups]); - return ( - } - fieldId="org-instance-groups" - > + const renderLookup = () => ( + <> + + ); + + return isPromptableField ? ( + + {renderLookup()} + + ) : ( + } + fieldId={id} + > + {renderLookup()} ); } InstanceGroupsLookup.propTypes = { + id: string, value: arrayOf(InstanceGroup).isRequired, tooltip: string, onChange: func.isRequired, @@ -148,6 +169,7 @@ InstanceGroupsLookup.propTypes = { }; InstanceGroupsLookup.defaultProps = { + id: 'org-instance-groups', tooltip: '', className: '', required: false, diff --git a/awx/ui/src/components/Lookup/InstanceGroupsLookup.test.js b/awx/ui/src/components/Lookup/InstanceGroupsLookup.test.js new file mode 100644 index 0000000000..b6acdb4ed9 --- /dev/null +++ b/awx/ui/src/components/Lookup/InstanceGroupsLookup.test.js @@ -0,0 +1,111 @@ +import React from 'react'; +import { act } from 'react-dom/test-utils'; +import { Formik } from 'formik'; +import { InstanceGroupsAPI } from 'api'; +import { mountWithContexts } from '../../../testUtils/enzymeHelpers'; +import InstanceGroupsLookup from './InstanceGroupsLookup'; + +jest.mock('../../api'); + +const mockedInstanceGroups = { + count: 1, + results: [ + { + id: 2, + name: 'Foo', + image: 'quay.io/ansible/awx-ee', + pull: 'missing', + }, + ], +}; + +const instanceGroups = [ + { + id: 1, + type: 'instance_group', + url: '/api/v2/instance_groups/1/', + related: { + jobs: '/api/v2/instance_groups/1/jobs/', + instances: '/api/v2/instance_groups/1/instances/', + }, + name: 'controlplane', + created: '2022-09-13T15:44:54.870579Z', + modified: '2022-09-13T15:44:54.886047Z', + capacity: 59, + consumed_capacity: 0, + percent_capacity_remaining: 100.0, + jobs_running: 0, + jobs_total: 40, + instances: 1, + is_container_group: false, + credential: null, + policy_instance_percentage: 100, + policy_instance_minimum: 0, + policy_instance_list: [], + pod_spec_override: '', + summary_fields: { + user_capabilities: { + edit: true, + delete: false, + }, + }, + }, +]; + +describe('InstanceGroupsLookup', () => { + let wrapper; + + beforeEach(() => { + InstanceGroupsAPI.read.mockResolvedValue({ + data: mockedInstanceGroups, + }); + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + test('should render successfully', async () => { + InstanceGroupsAPI.readOptions.mockReturnValue({ + data: { + actions: { + GET: {}, + POST: {}, + }, + related_search_fields: [], + }, + }); + await act(async () => { + wrapper = mountWithContexts( + + {}} /> + + ); + }); + wrapper.update(); + expect(InstanceGroupsAPI.read).toHaveBeenCalledTimes(1); + expect(wrapper.find('InstanceGroupsLookup')).toHaveLength(1); + expect(wrapper.find('FormGroup[label="Instance Groups"]').length).toBe(1); + expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe( + 0 + ); + }); + test('should render prompt on launch checkbox when necessary', async () => { + await act(async () => { + wrapper = mountWithContexts( + + {}} + isPromptableField + promptId="ig-prompt" + promptName="ask_instance_groups_on_launch" + /> + + ); + }); + expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe( + 1 + ); + }); +}); diff --git a/awx/ui/src/components/MultiSelect/useSyncedSelectValue.js b/awx/ui/src/components/MultiSelect/useSyncedSelectValue.js index 4af90fbb8b..38e226b895 100644 --- a/awx/ui/src/components/MultiSelect/useSyncedSelectValue.js +++ b/awx/ui/src/components/MultiSelect/useSyncedSelectValue.js @@ -17,13 +17,21 @@ export default function useSyncedSelectValue(value, onChange) { return; } const newOptions = []; - if (value !== selections && options.length) { + if (value && value !== selections && options.length) { const syncedValue = value.map((item) => { const match = options.find((i) => i.id === item.id); if (!match) { newOptions.push(item); } - return match || item; + + if (match) { + if (item.isReadOnly) { + match.isReadOnly = true; + } + return match; + } + + return item; }); setSelections(syncedValue); } diff --git a/awx/ui/src/components/PromptDetail/PromptDetail.js b/awx/ui/src/components/PromptDetail/PromptDetail.js index 742e4caf86..7122e6a63d 100644 --- a/awx/ui/src/components/PromptDetail/PromptDetail.js +++ b/awx/ui/src/components/PromptDetail/PromptDetail.js @@ -35,6 +35,9 @@ function formatTimeout(timeout) { if (typeof timeout === 'undefined' || timeout === null) { return null; } + if (typeof timeout === 'string') { + return timeout; + } const minutes = Math.floor(timeout / 60); const seconds = timeout - Math.floor(timeout / 60) * 60; return ( @@ -71,7 +74,13 @@ function hasPromptData(launchData) { launchData.ask_skip_tags_on_launch || launchData.ask_tags_on_launch || launchData.ask_variables_on_launch || - launchData.ask_verbosity_on_launch + launchData.ask_verbosity_on_launch || + launchData.ask_execution_environment_on_launch || + launchData.ask_labels_on_launch || + launchData.ask_forks_on_launch || + launchData.ask_job_slice_count_on_launch || + launchData.ask_timeout_on_launch || + launchData.ask_instance_groups_on_launch ); } @@ -206,6 +215,36 @@ function PromptDetail({ value={overrides.inventory?.name} /> )} + {launchConfig.ask_execution_environment_on_launch && ( + + )} + {launchConfig.ask_instance_groups_on_launch && ( + + {overrides.instance_groups.map((instance_group) => ( + + {instance_group.name} + + ))} + + } + /> + )} {launchConfig.ask_scm_branch_on_launch && ( )} + {launchConfig.ask_labels_on_launch && ( + + {overrides.labels.map((label) => ( + + {label.name} + + ))} + + } + isEmpty={overrides.labels.length === 0} + /> + )} + {launchConfig.ask_forks_on_launch && ( + + )} + {launchConfig.ask_job_slice_count_on_launch && ( + + )} + {launchConfig.ask_timeout_on_launch && ( + + )} {launchConfig.ask_diff_mode_on_launch && ( { assertDetail('Limit', 'localhost'); assertDetail('Verbosity', '3 (Debug)'); assertDetail('Show Changes', 'Off'); + assertDetail('Timeout', '1 min 40 sec'); + assertDetail('Forks', '1'); + assertDetail('Job Slicing', '1'); expect(wrapper.find('VariablesDetail').prop('value')).toEqual( '---foo: bar' ); + expect( + wrapper + .find('Detail[label="Labels"]') + .containsAllMatchingElements([ + L_91o2, + L_91o3, + ]) + ).toEqual(true); expect( wrapper .find('Detail[label="Credentials"]') @@ -151,6 +172,19 @@ describe('PromptDetail', () => { job_type: 'check', scm_branch: 'Bar branch', diff_mode: true, + forks: 2, + job_slice_count: 2, + timeout: 160, + labels: [ + { name: 'foo', id: 1 }, + { name: 'bar', id: 2 }, + ], + instance_groups: [ + { + id: 1, + name: 'controlplane', + }, + ], }; beforeAll(() => { @@ -182,9 +216,17 @@ describe('PromptDetail', () => { assertDetail('Limit', 'otherlimit'); assertDetail('Verbosity', '0 (Normal)'); assertDetail('Show Changes', 'On'); + assertDetail('Timeout', '2 min 40 sec'); + assertDetail('Forks', '2'); + assertDetail('Job Slicing', '2'); expect(wrapper.find('VariablesDetail').prop('value')).toEqual( '---one: two\nbar: baz' ); + expect( + wrapper + .find('Detail[label="Labels"]') + .containsAllMatchingElements([foo, bar]) + ).toEqual(true); expect( wrapper .find('Detail[label="Credentials"]') diff --git a/awx/ui/src/components/PromptDetail/PromptJobTemplateDetail.js b/awx/ui/src/components/PromptDetail/PromptJobTemplateDetail.js index 6e690337e1..adc82c3256 100644 --- a/awx/ui/src/components/PromptDetail/PromptJobTemplateDetail.js +++ b/awx/ui/src/components/PromptDetail/PromptJobTemplateDetail.js @@ -146,7 +146,10 @@ function PromptJobTemplateDetail({ resource }) { /> - + {typeof diff_mode === 'boolean' && ( diff --git a/awx/ui/src/components/PromptDetail/data.job_template.json b/awx/ui/src/components/PromptDetail/data.job_template.json index 5bbc5b26a8..620bbb9f96 100644 --- a/awx/ui/src/components/PromptDetail/data.job_template.json +++ b/awx/ui/src/components/PromptDetail/data.job_template.json @@ -3,159 +3,163 @@ "type": "job_template", "url": "/api/v2/job_templates/7/", "related": { - "named_url": "/api/v2/job_templates/MockJT/", - "created_by": "/api/v2/users/1/", - "modified_by": "/api/v2/users/1/", - "labels": "/api/v2/job_templates/7/labels/", - "inventory": "/api/v2/inventories/1/", - "project": "/api/v2/projects/6/", - "credentials": "/api/v2/job_templates/7/credentials/", - "last_job": "/api/v2/jobs/12/", - "jobs": "/api/v2/job_templates/7/jobs/", - "schedules": "/api/v2/job_templates/7/schedules/", - "activity_stream": "/api/v2/job_templates/7/activity_stream/", - "launch": "/api/v2/job_templates/7/launch/", - "webhook_key": "/api/v2/job_templates/7/webhook_key/", - "webhook_receiver": "/api/v2/job_templates/7/github/", - "notification_templates_started": "/api/v2/job_templates/7/notification_templates_started/", - "notification_templates_success": "/api/v2/job_templates/7/notification_templates_success/", - "notification_templates_error": "/api/v2/job_templates/7/notification_templates_error/", - "access_list": "/api/v2/job_templates/7/access_list/", - "survey_spec": "/api/v2/job_templates/7/survey_spec/", - "object_roles": "/api/v2/job_templates/7/object_roles/", - "instance_groups": "/api/v2/job_templates/7/instance_groups/", - "slice_workflow_jobs": "/api/v2/job_templates/7/slice_workflow_jobs/", - "copy": "/api/v2/job_templates/7/copy/", - "callback": "/api/v2/job_templates/7/callback/", - "webhook_credential": "/api/v2/credentials/8/" + "named_url": "/api/v2/job_templates/MockJT/", + "created_by": "/api/v2/users/1/", + "modified_by": "/api/v2/users/1/", + "labels": "/api/v2/job_templates/7/labels/", + "inventory": "/api/v2/inventories/1/", + "project": "/api/v2/projects/6/", + "credentials": "/api/v2/job_templates/7/credentials/", + "last_job": "/api/v2/jobs/12/", + "jobs": "/api/v2/job_templates/7/jobs/", + "schedules": "/api/v2/job_templates/7/schedules/", + "activity_stream": "/api/v2/job_templates/7/activity_stream/", + "launch": "/api/v2/job_templates/7/launch/", + "webhook_key": "/api/v2/job_templates/7/webhook_key/", + "webhook_receiver": "/api/v2/job_templates/7/github/", + "notification_templates_started": "/api/v2/job_templates/7/notification_templates_started/", + "notification_templates_success": "/api/v2/job_templates/7/notification_templates_success/", + "notification_templates_error": "/api/v2/job_templates/7/notification_templates_error/", + "access_list": "/api/v2/job_templates/7/access_list/", + "survey_spec": "/api/v2/job_templates/7/survey_spec/", + "object_roles": "/api/v2/job_templates/7/object_roles/", + "instance_groups": "/api/v2/job_templates/7/instance_groups/", + "slice_workflow_jobs": "/api/v2/job_templates/7/slice_workflow_jobs/", + "copy": "/api/v2/job_templates/7/copy/", + "callback": "/api/v2/job_templates/7/callback/", + "webhook_credential": "/api/v2/credentials/8/" }, "summary_fields": { - "inventory": { - "id": 1, - "name": "Demo Inventory", - "description": "", - "has_active_failures": false, - "total_hosts": 1, - "hosts_with_active_failures": 0, - "total_groups": 0, - "groups_with_active_failures": 0, - "has_inventory_sources": false, - "total_inventory_sources": 0, - "inventory_sources_with_failures": 0, - "organization_id": 1, - "kind": "" - }, - "execution_environment": { - "id": 1, - "name": "Default EE", - "description": "", - "image": "quay.io/ansible/awx-ee" - }, - "project": { - "id": 6, - "name": "Mock Project", - "description": "", - "status": "successful", - "scm_type": "git" - }, - "last_job": { - "id": 12, - "name": "Mock JT", - "description": "", - "finished": "2019-10-01T14:34:35.142483Z", - "status": "successful", - "failed": false - }, - "last_update": { - "id": 12, - "name": "Mock JT", - "description": "", - "status": "successful", - "failed": false - }, - "webhook_credential": { - "id": 8, - "name": "GitHub Cred", - "description": "", - "kind": "github_token", - "cloud": false, - "credential_type_id": 12 - }, - "created_by": { - "id": 1, - "username": "admin", - "first_name": "", - "last_name": "" - }, - "modified_by": { - "id": 1, - "username": "admin", - "first_name": "", - "last_name": "" - }, - "object_roles": { - "admin_role": { - "description": "Can manage all aspects of the job template", - "name": "Admin", - "id": 24 - }, - "execute_role": { - "description": "May run the job template", - "name": "Execute", - "id": 25 - }, - "read_role": { - "description": "May view settings for the job template", - "name": "Read", - "id": 26 - } - }, - "user_capabilities": { - "edit": true, - "delete": true, - "start": true, - "schedule": true, - "copy": true - }, - "labels": { - "count": 1, - "results": [ - { - "id": 91, - "name": "L_91o2" - }, - { - "id": 92, - "name": "L_91o3" - } - ] + "inventory": { + "id": 1, + "name": "Demo Inventory", + "description": "", + "has_active_failures": false, + "total_hosts": 1, + "hosts_with_active_failures": 0, + "total_groups": 0, + "groups_with_active_failures": 0, + "has_inventory_sources": false, + "total_inventory_sources": 0, + "inventory_sources_with_failures": 0, + "organization_id": 1, + "kind": "" }, - "survey": { - "title": "", - "description": "" + "execution_environment": { + "id": 1, + "name": "Default EE", + "description": "", + "image": "quay.io/ansible/awx-ee" + }, + "project": { + "id": 6, + "name": "Mock Project", + "description": "", + "status": "successful", + "scm_type": "git" + }, + "last_job": { + "id": 12, + "name": "Mock JT", + "description": "", + "finished": "2019-10-01T14:34:35.142483Z", + "status": "successful", + "failed": false + }, + "last_update": { + "id": 12, + "name": "Mock JT", + "description": "", + "status": "successful", + "failed": false + }, + "webhook_credential": { + "id": 8, + "name": "GitHub Cred", + "description": "", + "kind": "github_token", + "cloud": false, + "credential_type_id": 12 + }, + "created_by": { + "id": 1, + "username": "admin", + "first_name": "", + "last_name": "" + }, + "modified_by": { + "id": 1, + "username": "admin", + "first_name": "", + "last_name": "" + }, + "object_roles": { + "admin_role": { + "description": "Can manage all aspects of the job template", + "name": "Admin", + "id": 24 }, - "recent_jobs": [ - { - "id": 12, - "status": "successful", - "finished": "2019-10-01T14:34:35.142483Z", - "type": "job" - }, - { - "id": 13, - "status": "successful", - "finished": "2019-10-01T14:34:35.142483Z", - "type": "job" - } - ], - "credentials": [ + "execute_role": { + "description": "May run the job template", + "name": "Execute", + "id": 25 + }, + "read_role": { + "description": "May view settings for the job template", + "name": "Read", + "id": 26 + } + }, + "user_capabilities": { + "edit": true, + "delete": true, + "start": true, + "schedule": true, + "copy": true + }, + "labels": { + "count": 1, + "results": [ { - "id": 1, "kind": "ssh" , "name": "Credential 1" + "id": 91, + "name": "L_91o2" }, { - "id": 2, "kind": "awx" , "name": "Credential 2" + "id": 92, + "name": "L_91o3" } ] + }, + "survey": { + "title": "", + "description": "" + }, + "recent_jobs": [ + { + "id": 12, + "status": "successful", + "finished": "2019-10-01T14:34:35.142483Z", + "type": "job" + }, + { + "id": 13, + "status": "successful", + "finished": "2019-10-01T14:34:35.142483Z", + "type": "job" + } + ], + "credentials": [ + { + "id": 1, + "kind": "ssh", + "name": "Credential 1" + }, + { + "id": 2, + "kind": "awx", + "name": "Credential 2" + } + ] }, "created": "2019-09-30T16:18:34.564820Z", "modified": "2019-10-01T14:47:31.818431Z", diff --git a/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.js b/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.js index 61416e3a82..13d61345f7 100644 --- a/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.js +++ b/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.js @@ -1,12 +1,10 @@ import React, { useState } from 'react'; import { func, shape } from 'prop-types'; - import { useHistory, useLocation } from 'react-router-dom'; import { Card } from '@patternfly/react-core'; import yaml from 'js-yaml'; import { parseVariableField } from 'util/yaml'; - -import { SchedulesAPI } from 'api'; +import { OrganizationsAPI, SchedulesAPI } from 'api'; import mergeExtraVars from 'util/prompt/mergeExtraVars'; import getSurveyValues from 'util/prompt/getSurveyValues'; import { getAddedAndRemoved } from 'util/lists'; @@ -34,6 +32,8 @@ function ScheduleAdd({ surveyConfiguration ) => { const { + execution_environment, + instance_groups, inventory, frequency, frequencyOptions, @@ -41,6 +41,7 @@ function ScheduleAdd({ exceptionOptions, timezone, credentials, + labels, ...submitValues } = values; const { added } = getAddedAndRemoved( @@ -72,6 +73,10 @@ function ScheduleAdd({ submitValues.inventory = inventory.id; } + if (execution_environment) { + submitValues.execution_environment = execution_environment.id; + } + try { const ruleSet = buildRuleSet(values); const requestData = { @@ -94,13 +99,46 @@ function ScheduleAdd({ const { data: { id: scheduleId }, } = await apiModel.createSchedule(resource.id, requestData); - if (credentials?.length > 0) { - await Promise.all( - added.map(({ id: credentialId }) => - SchedulesAPI.associateCredential(scheduleId, credentialId) - ) + + let labelsPromises = []; + let credentialsPromises = []; + + if (launchConfiguration?.ask_labels_on_launch && labels) { + let organizationId = resource.organization; + if (!organizationId) { + // eslint-disable-next-line no-useless-catch + try { + const { + data: { results }, + } = await OrganizationsAPI.read(); + organizationId = results[0].id; + } catch (err) { + throw err; + } + } + + labelsPromises = labels.map((label) => + SchedulesAPI.associateLabel(scheduleId, label, organizationId) ); } + + if (launchConfiguration?.ask_credential_on_launch && added?.length > 0) { + credentialsPromises = added.map(({ id: credentialId }) => + SchedulesAPI.associateCredential(scheduleId, credentialId) + ); + } + await Promise.all([labelsPromises, credentialsPromises]); + + if ( + launchConfiguration?.ask_instance_groups_on_launch && + instance_groups + ) { + /* eslint-disable no-await-in-loop, no-restricted-syntax */ + for (const group of instance_groups) { + await SchedulesAPI.associateInstanceGroup(scheduleId, group.id); + } + } + history.push(`${pathRoot}schedules/${scheduleId}`); } catch (err) { setFormSubmitError(err); diff --git a/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.test.js b/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.test.js index 870fa15edf..57def9f64e 100644 --- a/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.test.js +++ b/awx/ui/src/components/Schedule/ScheduleAdd/ScheduleAdd.test.js @@ -1,11 +1,21 @@ import React from 'react'; import { act } from 'react-dom/test-utils'; import { RRule } from 'rrule'; -import { SchedulesAPI, JobTemplatesAPI, InventoriesAPI } from 'api'; +import { + CredentialsAPI, + CredentialTypesAPI, + SchedulesAPI, + JobTemplatesAPI, + InventoriesAPI, +} from 'api'; import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; import ScheduleAdd from './ScheduleAdd'; -jest.mock('../../../api'); +jest.mock('../../../api/models/Credentials'); +jest.mock('../../../api/models/CredentialTypes'); +jest.mock('../../../api/models/Schedules'); +jest.mock('../../../api/models/JobTemplates'); +jest.mock('../../../api/models/Inventories'); const launchConfig = { can_start_without_user_input: false, @@ -19,7 +29,7 @@ const launchConfig = { ask_limit_on_launch: false, ask_verbosity_on_launch: false, ask_inventory_on_launch: true, - ask_credential_on_launch: false, + ask_credential_on_launch: true, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -57,6 +67,33 @@ describe('', () => { ], }); JobTemplatesAPI.createSchedule.mockResolvedValue({ data: { id: 3 } }); + + CredentialTypesAPI.loadAllTypes.mockResolvedValue([ + { id: 1, name: 'ssh', kind: 'ssh' }, + ]); + + CredentialsAPI.read.mockResolvedValue({ + data: { + count: 1, + results: [ + { + id: 10, + name: 'cred 1', + kind: 'ssh', + url: '', + credential_type: 1, + }, + ], + }, + }); + + CredentialsAPI.readOptions.mockResolvedValue({ + data: { + related_search_fields: [], + actions: { GET: { filterabled: true } }, + }, + }); + await act(async () => { wrapper = mountWithContexts( ', () => { description: '', }} launchConfig={launchConfig} + surveyConfig={{}} /> ); }); @@ -390,6 +428,7 @@ describe('', () => { wrapper.find('Button[aria-label="Prompt"]').prop('onClick')() ); wrapper.update(); + // Inventory step expect(wrapper.find('WizardNavItem').at(0).prop('isCurrent')).toBe(true); await act(async () => { wrapper.find('td#check-action-item-1').find('input').simulate('click'); @@ -402,7 +441,21 @@ describe('', () => { wrapper.find('WizardFooterInternal').prop('onNext')() ); wrapper.update(); + // Credential step expect(wrapper.find('WizardNavItem').at(1).prop('isCurrent')).toBe(true); + await act(async () => { + wrapper.find('td#check-action-item-10').find('input').simulate('click'); + }); + wrapper.update(); + expect( + wrapper.find('td#check-action-item-10').find('input').prop('checked') + ).toBe(true); + await act(async () => + wrapper.find('WizardFooterInternal').prop('onNext')() + ); + wrapper.update(); + // Preview step + expect(wrapper.find('WizardNavItem').at(2).prop('isCurrent')).toBe(true); await act(async () => wrapper.find('WizardFooterInternal').prop('onNext')() ); @@ -414,10 +467,7 @@ describe('', () => { frequency: [], skip_tags: '', inventory: { name: 'inventory', id: 45 }, - credentials: [ - { name: 'cred 1', id: 10 }, - { name: 'cred 2', id: 20 }, - ], + credentials: [{ name: 'cred 1', id: 10 }], startDate: '2021-01-28', startTime: '2:15 PM', timezone: 'America/New_York', @@ -434,7 +484,6 @@ describe('', () => { skip_tags: '', }); expect(SchedulesAPI.associateCredential).toBeCalledWith(3, 10); - expect(SchedulesAPI.associateCredential).toBeCalledWith(3, 20); }); test('should submit survey with default values properly, without opening prompt wizard', async () => { diff --git a/awx/ui/src/components/Schedule/ScheduleDetail/ScheduleDetail.js b/awx/ui/src/components/Schedule/ScheduleDetail/ScheduleDetail.js index b7516ea213..a5650ac238 100644 --- a/awx/ui/src/components/Schedule/ScheduleDetail/ScheduleDetail.js +++ b/awx/ui/src/components/Schedule/ScheduleDetail/ScheduleDetail.js @@ -27,6 +27,11 @@ import { VariablesDetail } from '../../CodeEditor'; import { VERBOSITY } from '../../VerbositySelectField'; import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext'; +const buildLinkURL = (instance) => + instance.is_container_group + ? '/instance_groups/container_group/' + : '/instance_groups/'; + const PromptDivider = styled(Divider)` margin-top: var(--pf-global--spacer--lg); margin-bottom: var(--pf-global--spacer--lg); @@ -73,8 +78,11 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { diff_mode, dtend, dtstart, + execution_environment, extra_data, + forks, inventory, + job_slice_count, job_tags, job_type, limit, @@ -85,6 +93,7 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { scm_branch, skip_tags, summary_fields, + timeout, timezone, verbosity, } = schedule; @@ -108,7 +117,7 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { const { error, dismissError } = useDismissableError(deleteError); const { - result: [credentials, preview, launchData], + result: [credentials, preview, launchData, labels, instanceGroups], isLoading, error: readContentError, request: fetchCredentialsAndPreview, @@ -128,7 +137,9 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { promises.push( JobTemplatesAPI.readLaunch( schedule.summary_fields.unified_job_template.id - ) + ), + SchedulesAPI.readAllLabels(id), + SchedulesAPI.readInstanceGroups(id) ); } else if ( schedule?.summary_fields?.unified_job_template?.unified_job_type === @@ -137,17 +148,28 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { promises.push( WorkflowJobTemplatesAPI.readLaunch( schedule.summary_fields.unified_job_template.id - ) + ), + SchedulesAPI.readAllLabels(id) ); } else { promises.push(Promise.resolve()); } - const [{ data }, { data: schedulePreview }, launch] = await Promise.all( - promises - ); + const [ + { data }, + { data: schedulePreview }, + launch, + allLabelsResults, + instanceGroupsResults, + ] = await Promise.all(promises); - return [data.results, schedulePreview, launch?.data]; + return [ + data.results, + schedulePreview, + launch?.data, + allLabelsResults?.data?.results, + instanceGroupsResults?.data?.results, + ]; }, [id, schedule, rrule]), [] ); @@ -185,6 +207,12 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { ask_tags_on_launch, ask_variables_on_launch, ask_verbosity_on_launch, + ask_execution_environment_on_launch, + ask_labels_on_launch, + ask_forks_on_launch, + ask_job_slice_count_on_launch, + ask_timeout_on_launch, + ask_instance_groups_on_launch, survey_enabled, } = launchData || {}; @@ -239,6 +267,16 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { const showJobTypeDetail = ask_job_type_on_launch && job_type; const showSCMBranchDetail = ask_scm_branch_on_launch && scm_branch; const showVerbosityDetail = ask_verbosity_on_launch && VERBOSITY()[verbosity]; + const showExecutionEnvironmentDetail = + ask_execution_environment_on_launch && execution_environment; + const showLabelsDetail = ask_labels_on_launch && labels && labels.length > 0; + const showForksDetail = ask_forks_on_launch && typeof forks === 'number'; + const showJobSlicingDetail = + ask_job_slice_count_on_launch && typeof job_slice_count === 'number'; + const showTimeoutDetail = + ask_timeout_on_launch && typeof timeout === 'number'; + const showInstanceGroupsDetail = + ask_instance_groups_on_launch && instanceGroups.length > 0; const showPromptedFields = showCredentialsDetail || @@ -250,7 +288,13 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { showSkipTagsDetail || showTagsDetail || showVerbosityDetail || - showVariablesDetail; + showVariablesDetail || + showExecutionEnvironmentDetail || + showLabelsDetail || + showForksDetail || + showJobSlicingDetail || + showTimeoutDetail || + showInstanceGroupsDetail; if (isLoading) { return ; @@ -402,11 +446,20 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { dataCy="schedule-inventory" /> )} - {ask_verbosity_on_launch && ( + {showExecutionEnvironmentDetail && ( + {summary_fields?.execution_environment?.name} + + ) : ( + ' ' + ) + } /> )} {ask_scm_branch_on_launch && ( @@ -419,6 +472,17 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) { {ask_limit_on_launch && ( )} + {ask_forks_on_launch && } + {ask_verbosity_on_launch && ( + + )} + {ask_timeout_on_launch && ( + + )} {showDiffModeDetail && ( )} + {ask_job_slice_count_on_launch && ( + + )} + {showInstanceGroupsDetail && ( + + {instanceGroups.map((ig) => ( + + + {ig.name} + + + ))} + + } + isEmpty={instanceGroups.length === 0} + /> + )} {showCredentialsDetail && ( )} + {showLabelsDetail && ( + + {labels.map((l) => ( + + {l.name} + + ))} + + } + isEmpty={labels.length === 0} + /> + )} {showTagsDetail && ( ', () => { @@ -182,6 +202,14 @@ describe('', () => { expect(wrapper.find('Detail[label="Credentials"]').length).toBe(0); expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(0); expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(0); + expect(wrapper.find('Detail[label="Timeout"]').length).toBe(0); + expect(wrapper.find('Detail[label="Job Slicing"]').length).toBe(0); + expect(wrapper.find('Detail[label="Forks"]').length).toBe(0); + expect(wrapper.find('Detail[label="Labels"]').length).toBe(0); + expect(wrapper.find('Detail[label="Instance Groups"]').length).toBe(0); + expect(wrapper.find('Detail[label="Execution Environment"]').length).toBe( + 0 + ); expect(wrapper.find('VariablesDetail').length).toBe(0); }); test('details should render with the proper values with prompts', async () => { @@ -200,6 +228,28 @@ describe('', () => { ], }, }); + SchedulesAPI.readInstanceGroups.mockResolvedValue({ + data: { + count: 1, + results: [ + { + id: 1, + name: 'IG 1', + }, + ], + }, + }); + SchedulesAPI.readAllLabels.mockResolvedValue({ + data: { + count: 1, + results: [ + { + id: 1, + name: 'Label 1', + }, + ], + }, + }); JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts); await act(async () => { wrapper = mountWithContexts( @@ -254,6 +304,14 @@ describe('', () => { expect(wrapper.find('Detail[label="Credentials"]').length).toBe(1); expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(1); expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(1); + expect(wrapper.find('Detail[label="Timeout"]').length).toBe(1); + expect(wrapper.find('Detail[label="Job Slicing"]').length).toBe(1); + expect(wrapper.find('Detail[label="Forks"]').length).toBe(1); + expect(wrapper.find('Detail[label="Labels"]').length).toBe(1); + expect(wrapper.find('Detail[label="Instance Groups"]').length).toBe(1); + expect(wrapper.find('Detail[label="Execution Environment"]').length).toBe( + 1 + ); expect(wrapper.find('VariablesDetail').length).toBe(1); }); test('prompt values section should be hidden if no overrides are present on the schedule but ask_ options are all true', async () => { @@ -263,6 +321,18 @@ describe('', () => { results: [], }, }); + SchedulesAPI.readInstanceGroups.mockResolvedValue({ + data: { + count: 0, + results: [], + }, + }); + SchedulesAPI.readAllLabels.mockResolvedValue({ + data: { + count: 0, + results: [], + }, + }); JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts); await act(async () => { wrapper = mountWithContexts( @@ -296,6 +366,14 @@ describe('', () => { expect(wrapper.find('Detail[label="Credentials"]').length).toBe(0); expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(0); expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(0); + expect(wrapper.find('Detail[label="Timeout"]').length).toBe(0); + expect(wrapper.find('Detail[label="Job Slicing"]').length).toBe(0); + expect(wrapper.find('Detail[label="Forks"]').length).toBe(0); + expect(wrapper.find('Detail[label="Labels"]').length).toBe(0); + expect(wrapper.find('Detail[label="Instance Groups"]').length).toBe(0); + expect(wrapper.find('Detail[label="Execution Environment"]').length).toBe( + 0 + ); expect(wrapper.find('VariablesDetail').length).toBe(0); }); test('prompt values section should be hidden if overrides are present on the schedule but ask_ options are all false', async () => { @@ -469,6 +547,18 @@ describe('', () => { results: [], }, }); + SchedulesAPI.readInstanceGroups.mockResolvedValue({ + data: { + count: 0, + results: [], + }, + }); + SchedulesAPI.readAllLabels.mockResolvedValue({ + data: { + count: 0, + results: [], + }, + }); JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts); await act(async () => { wrapper = mountWithContexts( diff --git a/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.js b/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.js index 143a428de0..adc5ff1b97 100644 --- a/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.js +++ b/awx/ui/src/components/Schedule/ScheduleEdit/ScheduleEdit.js @@ -1,15 +1,14 @@ import React, { useState } from 'react'; - import { useHistory, useLocation } from 'react-router-dom'; import { shape } from 'prop-types'; import { Card } from '@patternfly/react-core'; import yaml from 'js-yaml'; -import { SchedulesAPI } from 'api'; +import { OrganizationsAPI, SchedulesAPI } from 'api'; import { getAddedAndRemoved } from 'util/lists'; - import { parseVariableField } from 'util/yaml'; import mergeExtraVars from 'util/prompt/mergeExtraVars'; import getSurveyValues from 'util/prompt/getSurveyValues'; +import createNewLabels from 'util/labels'; import ScheduleForm from '../shared/ScheduleForm'; import buildRuleSet from '../shared/buildRuleSet'; import { CardBody } from '../../Card'; @@ -32,9 +31,13 @@ function ScheduleEdit({ values, launchConfiguration, surveyConfiguration, + originalInstanceGroups, + originalLabels, scheduleCredentials = [] ) => { const { + execution_environment, + instance_groups, inventory, credentials = [], frequency, @@ -42,13 +45,9 @@ function ScheduleEdit({ exceptionFrequency, exceptionOptions, timezone, + labels, ...submitValues } = values; - const { added, removed } = getAddedAndRemoved( - [...(resource?.summary_fields.credentials || []), ...scheduleCredentials], - credentials - ); - let extraVars; const surveyValues = getSurveyValues(values); @@ -82,7 +81,24 @@ function ScheduleEdit({ submitValues.inventory = inventory.id; } + if (execution_environment) { + submitValues.execution_environment = execution_environment.id; + } + try { + if (launchConfiguration?.ask_labels_on_launch) { + const { labelIds, error } = createNewLabels( + values.labels, + resource.organization + ); + + if (error) { + setFormSubmitError(error); + } else { + submitValues.labels = labelIds; + } + } + const ruleSet = buildRuleSet(values); const requestData = { ...submitValues, @@ -104,17 +120,52 @@ function ScheduleEdit({ const { data: { id: scheduleId }, } = await SchedulesAPI.update(schedule.id, requestData); - if (values.credentials?.length > 0) { - await Promise.all([ - ...removed.map(({ id }) => - SchedulesAPI.disassociateCredential(scheduleId, id) - ), - ...added.map(({ id }) => - SchedulesAPI.associateCredential(scheduleId, id) - ), - ]); + + const { added: addedCredentials, removed: removedCredentials } = + getAddedAndRemoved( + [ + ...(resource?.summary_fields.credentials || []), + ...scheduleCredentials, + ], + credentials + ); + + const { added: addedLabels, removed: removedLabels } = getAddedAndRemoved( + originalLabels, + labels + ); + + let organizationId = resource.organization; + + if (addedLabels.length > 0) { + if (!organizationId) { + const { + data: { results }, + } = await OrganizationsAPI.read(); + organizationId = results[0].id; + } } + await Promise.all([ + ...removedCredentials.map(({ id }) => + SchedulesAPI.disassociateCredential(scheduleId, id) + ), + ...addedCredentials.map(({ id }) => + SchedulesAPI.associateCredential(scheduleId, id) + ), + ...removedLabels.map((label) => + SchedulesAPI.disassociateLabel(scheduleId, label) + ), + ...addedLabels.map((label) => + SchedulesAPI.associateLabel(scheduleId, label, organizationId) + ), + SchedulesAPI.orderInstanceGroups( + scheduleId, + instance_groups || [], + originalInstanceGroups + ), + ]); + history.push(`${pathRoot}schedules/${scheduleId}/details`); } catch (err) { setFormSubmitError(err); diff --git a/awx/ui/src/components/Schedule/shared/ScheduleForm.js b/awx/ui/src/components/Schedule/shared/ScheduleForm.js index 143cb3b445..96e25ba154 100644 --- a/awx/ui/src/components/Schedule/shared/ScheduleForm.js +++ b/awx/ui/src/components/Schedule/shared/ScheduleForm.js @@ -1,13 +1,12 @@ -import React, { useEffect, useCallback, useState } from 'react'; +import React, { useEffect, useCallback, useState, useRef } from 'react'; import { shape, func } from 'prop-types'; - import { DateTime } from 'luxon'; import { t } from '@lingui/macro'; import { Formik } from 'formik'; import { RRule } from 'rrule'; import { Button, Form, ActionGroup } from '@patternfly/react-core'; import { Config } from 'contexts/Config'; -import { SchedulesAPI } from 'api'; +import { JobTemplatesAPI, SchedulesAPI, WorkflowJobTemplatesAPI } from 'api'; import { dateToInputDateTime } from 'util/dates'; import useRequest from 'hooks/useRequest'; import { parseVariableField } from 'util/yaml'; @@ -31,7 +30,7 @@ const NUM_DAYS_PER_FREQUENCY = { function ScheduleForm({ hasDaysToKeepField, handleCancel, - handleSubmit, + handleSubmit: submitSchedule, schedule, submitError, resource, @@ -41,6 +40,8 @@ function ScheduleForm({ }) { const [isWizardOpen, setIsWizardOpen] = useState(false); const [isSaveDisabled, setIsSaveDisabled] = useState(false); + const originalLabels = useRef([]); + const originalInstanceGroups = useRef([]); let rruleError; const now = DateTime.now(); @@ -60,12 +61,52 @@ function ScheduleForm({ useCallback(async () => { const { data } = await SchedulesAPI.readZoneInfo(); - let creds; + let creds = []; + let allLabels = []; + let allInstanceGroups = []; if (schedule.id) { - const { - data: { results }, - } = await SchedulesAPI.readCredentials(schedule.id); - creds = results; + if ( + resource.type === 'job_template' && + launchConfig.ask_credential_on_launch + ) { + const { + data: { results }, + } = await SchedulesAPI.readCredentials(schedule.id); + creds = results; + } + if (launchConfig.ask_labels_on_launch) { + const { + data: { results }, + } = await SchedulesAPI.readAllLabels(schedule.id); + allLabels = results; + } + if ( + resource.type === 'job_template' && + launchConfig.ask_instance_groups_on_launch + ) { + const { + data: { results }, + } = await SchedulesAPI.readInstanceGroups(schedule.id); + allInstanceGroups = results; + } + } else { + if (resource.type === 'job_template') { + if (launchConfig.ask_labels_on_launch) { + const { + data: { results }, + } = await JobTemplatesAPI.readAllLabels(resource.id); + allLabels = results; + } + } + if ( + resource.type === 'workflow_job_template' && + launchConfig.ask_labels_on_launch + ) { + const { + data: { results }, + } = await WorkflowJobTemplatesAPI.readAllLabels(resource.id); + allLabels = results; + } } const zones = (data.zones || []).map((zone) => ({ @@ -74,12 +115,22 @@ function ScheduleForm({ label: zone, })); + originalLabels.current = allLabels; + originalInstanceGroups.current = allInstanceGroups; + return { zoneOptions: zones, zoneLinks: data.links, - credentials: creds || [], + credentials: creds, }; - }, [schedule]), + }, [ + schedule, + resource.id, + resource.type, + launchConfig.ask_labels_on_launch, + launchConfig.ask_instance_groups_on_launch, + launchConfig.ask_credential_on_launch, + ]), { zonesOptions: [], zoneLinks: {}, @@ -225,6 +276,12 @@ function ScheduleForm({ launchConfig.ask_scm_branch_on_launch || launchConfig.ask_tags_on_launch || launchConfig.ask_skip_tags_on_launch || + launchConfig.ask_execution_environment_on_launch || + launchConfig.ask_labels_on_launch || + launchConfig.ask_forks_on_launch || + launchConfig.ask_job_slice_count_on_launch || + launchConfig.ask_timeout_on_launch || + launchConfig.ask_instance_groups_on_launch || launchConfig.survey_enabled || launchConfig.inventory_needed_to_start || launchConfig.variables_needed_to_start?.length > 0) @@ -301,19 +358,6 @@ function ScheduleForm({ startTime: time, timezone: schedule.timezone || now.zoneName, }; - const submitSchedule = ( - values, - launchConfiguration, - surveyConfiguration, - scheduleCredentials - ) => { - handleSubmit( - values, - launchConfiguration, - surveyConfiguration, - scheduleCredentials - ); - }; if (hasDaysToKeepField) { let initialDaysToKeep = 30; @@ -436,7 +480,14 @@ function ScheduleForm({ }, }} onSubmit={(values) => { - submitSchedule(values, launchConfig, surveyConfig, credentials); + submitSchedule( + values, + launchConfig, + surveyConfig, + originalInstanceGroups.current, + originalLabels.current, + credentials + ); }} validate={validate} > @@ -463,6 +514,8 @@ function ScheduleForm({ setIsSaveDisabled(false); }} resourceDefaultCredentials={resourceDefaultCredentials} + labels={originalLabels.current} + instanceGroups={originalInstanceGroups.current} /> )} diff --git a/awx/ui/src/components/Schedule/shared/ScheduleForm.test.js b/awx/ui/src/components/Schedule/shared/ScheduleForm.test.js index 47936fc314..5e1ea28b8d 100644 --- a/awx/ui/src/components/Schedule/shared/ScheduleForm.test.js +++ b/awx/ui/src/components/Schedule/shared/ScheduleForm.test.js @@ -17,11 +17,35 @@ jest.mock('../../../api/models/Inventories'); const credentials = { data: { results: [ - { id: 1, kind: 'cloud', name: 'Cred 1', url: 'www.google.com' }, - { id: 2, kind: 'ssh', name: 'Cred 2', url: 'www.google.com' }, - { id: 3, kind: 'Ansible', name: 'Cred 3', url: 'www.google.com' }, - { id: 4, kind: 'Machine', name: 'Cred 4', url: 'www.google.com' }, - { id: 5, kind: 'Machine', name: 'Cred 5', url: 'www.google.com' }, + { + id: 1, + kind: 'cloud', + name: 'Cred 1', + url: 'www.google.com', + inputs: {}, + }, + { id: 2, kind: 'ssh', name: 'Cred 2', url: 'www.google.com', inputs: {} }, + { + id: 3, + kind: 'Ansible', + name: 'Cred 3', + url: 'www.google.com', + inputs: {}, + }, + { + id: 4, + kind: 'Machine', + name: 'Cred 4', + url: 'www.google.com', + inputs: {}, + }, + { + id: 5, + kind: 'Machine', + name: 'Cred 5', + url: 'www.google.com', + inputs: {}, + }, ], }, }; @@ -39,6 +63,12 @@ const launchData = { ask_verbosity_on_launch: false, ask_inventory_on_launch: true, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -153,6 +183,12 @@ describe('', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: true, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -208,6 +244,12 @@ describe('', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: true, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -275,6 +317,12 @@ describe('', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: true, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -406,6 +454,12 @@ describe('', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: true, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -465,6 +519,12 @@ describe('', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: false, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -894,7 +954,7 @@ describe('', () => { jest.clearAllMocks(); }); - test('should make API calls to fetch credentials, launch configuration, and survey configuration', async () => { + test('should make API calls to fetch credentials, labels, and zone info', async () => { await act(async () => { wrapper = mountWithContexts( ', () => { type: 'job_template', name: 'Foo Job Template', description: '', + summary_fields: { + credentials: [], + }, }} launchConfig={{ can_start_without_user_input: true, @@ -919,7 +982,13 @@ describe('', () => { ask_limit_on_launch: false, ask_verbosity_on_launch: false, ask_inventory_on_launch: false, - ask_credential_on_launch: false, + ask_credential_on_launch: true, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: true, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -933,7 +1002,9 @@ describe('', () => { /> ); }); + expect(SchedulesAPI.readZoneInfo).toBeCalled(); expect(SchedulesAPI.readCredentials).toBeCalledWith(27); + expect(SchedulesAPI.readAllLabels).toBeCalledWith(27); }); test('should not call API to get credentials ', async () => { @@ -961,6 +1032,12 @@ describe('', () => { ask_verbosity_on_launch: false, ask_inventory_on_launch: false, ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, survey_enabled: false, variables_needed_to_start: [], credential_needed_to_start: false, @@ -991,6 +1068,30 @@ describe('', () => { name: 'Foo Project', description: '', }} + launchConfig={{ + can_start_without_user_input: true, + passwords_needed_to_start: [], + ask_scm_branch_on_launch: false, + ask_variables_on_launch: false, + ask_tags_on_launch: false, + ask_diff_mode_on_launch: false, + ask_skip_tags_on_launch: false, + ask_job_type_on_launch: false, + ask_limit_on_launch: false, + ask_verbosity_on_launch: false, + ask_inventory_on_launch: false, + ask_credential_on_launch: false, + ask_execution_environment_on_launch: false, + ask_labels_on_launch: false, + ask_forks_on_launch: false, + ask_job_slice_count_on_launch: false, + ask_timeout_on_launch: false, + ask_instance_groups_on_launch: false, + survey_enabled: false, + variables_needed_to_start: [], + credential_needed_to_start: false, + inventory_needed_to_start: false, + }} /> ); }); diff --git a/awx/ui/src/components/Schedule/shared/SchedulePromptableFields.js b/awx/ui/src/components/Schedule/shared/SchedulePromptableFields.js index 406398806b..d0faf3248d 100644 --- a/awx/ui/src/components/Schedule/shared/SchedulePromptableFields.js +++ b/awx/ui/src/components/Schedule/shared/SchedulePromptableFields.js @@ -17,6 +17,8 @@ function SchedulePromptableFields({ credentials, resource, resourceDefaultCredentials, + labels, + instanceGroups, }) { const { setFieldTouched, values, initialValues, resetForm } = useFormikContext(); @@ -33,7 +35,9 @@ function SchedulePromptableFields({ schedule, resource, credentials, - resourceDefaultCredentials + resourceDefaultCredentials, + labels, + instanceGroups ); const [showDescription, setShowDescription] = useState(false); const { error, dismissError } = useDismissableError(contentError); diff --git a/awx/ui/src/components/Schedule/shared/useSchedulePromptSteps.js b/awx/ui/src/components/Schedule/shared/useSchedulePromptSteps.js index ef31e14d23..630cc119ba 100644 --- a/awx/ui/src/components/Schedule/shared/useSchedulePromptSteps.js +++ b/awx/ui/src/components/Schedule/shared/useSchedulePromptSteps.js @@ -3,6 +3,8 @@ import { useFormikContext } from 'formik'; import { t } from '@lingui/macro'; import useInventoryStep from '../../LaunchPrompt/steps/useInventoryStep'; import useCredentialsStep from '../../LaunchPrompt/steps/useCredentialsStep'; +import useExecutionEnvironmentStep from '../../LaunchPrompt/steps/useExecutionEnvironmentStep'; +import useInstanceGroupsStep from '../../LaunchPrompt/steps/useInstanceGroupsStep'; import useOtherPromptsStep from '../../LaunchPrompt/steps/useOtherPromptsStep'; import useSurveyStep from '../../LaunchPrompt/steps/useSurveyStep'; import usePreviewStep from '../../LaunchPrompt/steps/usePreviewStep'; @@ -12,9 +14,10 @@ export default function useSchedulePromptSteps( launchConfig, schedule, resource, - scheduleCredentials, - resourceDefaultCredentials + resourceDefaultCredentials, + labels, + instanceGroups ) { const sourceOfValues = (Object.keys(schedule).length > 0 && schedule) || resource; @@ -28,7 +31,9 @@ export default function useSchedulePromptSteps( sourceOfValues, resourceDefaultCredentials ), - useOtherPromptsStep(launchConfig, sourceOfValues), + useExecutionEnvironmentStep(launchConfig, resource), + useInstanceGroupsStep(launchConfig, resource, instanceGroups), + useOtherPromptsStep(launchConfig, sourceOfValues, labels), useSurveyStep(launchConfig, surveyConfig, sourceOfValues, visited), ]; @@ -37,7 +42,6 @@ export default function useSchedulePromptSteps( steps.push( usePreviewStep( launchConfig, - resource, surveyConfig, hasErrors, @@ -130,6 +134,8 @@ export default function useSchedulePromptSteps( setVisited({ inventory: true, credentials: true, + executionEnvironment: true, + instanceGroups: true, other: true, survey: true, preview: true, diff --git a/awx/ui/src/components/Workflow/workflowReducer.js b/awx/ui/src/components/Workflow/workflowReducer.js index e1bd2dfc8e..be77528136 100644 --- a/awx/ui/src/components/Workflow/workflowReducer.js +++ b/awx/ui/src/components/Workflow/workflowReducer.js @@ -8,6 +8,7 @@ export function initReducer() { addNodeTarget: null, addingLink: false, contentError: null, + defaultOrganization: null, isLoading: true, linkToDelete: null, linkToEdit: null, @@ -64,6 +65,11 @@ export default function visualizerReducer(state, action) { ...state, contentError: action.value, }; + case 'SET_DEFAULT_ORGANIZATION': + return { + ...state, + defaultOrganization: action.value, + }; case 'SET_IS_LOADING': return { ...state, diff --git a/awx/ui/src/components/Workflow/workflowReducer.test.js b/awx/ui/src/components/Workflow/workflowReducer.test.js index 3570f701dd..e241d76bff 100644 --- a/awx/ui/src/components/Workflow/workflowReducer.test.js +++ b/awx/ui/src/components/Workflow/workflowReducer.test.js @@ -7,6 +7,7 @@ const defaultState = { addNodeTarget: null, addingLink: false, contentError: null, + defaultOrganization: null, isLoading: true, linkToDelete: null, linkToEdit: null, @@ -1281,6 +1282,18 @@ describe('Workflow reducer', () => { }); }); }); + describe('SET_DEFAULT_ORGANIZATION', () => { + it('should set the state variable', () => { + const result = workflowReducer(defaultState, { + type: 'SET_DEFAULT_ORGANIZATION', + value: 1, + }); + expect(result).toEqual({ + ...defaultState, + defaultOrganization: 1, + }); + }); + }); describe('SET_IS_LOADING', () => { it('should set the state variable', () => { const result = workflowReducer(defaultState, { diff --git a/awx/ui/src/hooks/useSelected.js b/awx/ui/src/hooks/useSelected.js index 3587a2efe2..f596f5ca7f 100644 --- a/awx/ui/src/hooks/useSelected.js +++ b/awx/ui/src/hooks/useSelected.js @@ -12,8 +12,8 @@ import { useState, useCallback } from 'react'; * } */ -export default function useSelected(list = []) { - const [selected, setSelected] = useState([]); +export default function useSelected(list = [], defaultSelected = []) { + const [selected, setSelected] = useState(defaultSelected); const isAllSelected = selected.length > 0 && selected.length === list.length; const handleSelect = (row) => { diff --git a/awx/ui/src/screens/Job/JobDetail/JobDetail.js b/awx/ui/src/screens/Job/JobDetail/JobDetail.js index 2ffd6b6f75..d3435307cf 100644 --- a/awx/ui/src/screens/Job/JobDetail/JobDetail.js +++ b/awx/ui/src/screens/Job/JobDetail/JobDetail.js @@ -391,6 +391,16 @@ function JobDetail({ job, inventorySourceLabels }) { helpText={jobHelpText.forks} /> )} + {typeof job.timeout === 'number' && ( + + )} {credential && ( ', () => { test('calls workflowJobTemplatesAPI with correct information on submit', async () => { await act(async () => { wrapper.find('input#wfjt-name').simulate('change', { - target: { value: 'Alex', name: 'name' }, + target: { value: 'Alex Singh', name: 'name' }, }); wrapper.find('LabelSelect').find('SelectToggle').simulate('click'); @@ -104,18 +104,23 @@ describe('', () => { wrapper.find('form').simulate('submit'); }); await expect(WorkflowJobTemplatesAPI.create).toHaveBeenCalledWith({ - name: 'Alex', + name: 'Alex Singh', allow_simultaneous: false, ask_inventory_on_launch: false, + ask_labels_on_launch: false, ask_limit_on_launch: false, ask_scm_branch_on_launch: false, + ask_skip_tags_on_launch: false, + ask_tags_on_launch: false, ask_variables_on_launch: false, description: '', extra_vars: '---', inventory: undefined, + job_tags: null, limit: null, organization: undefined, scm_branch: '', + skip_tags: null, webhook_credential: undefined, webhook_service: '', webhook_url: '', diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.js b/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.js index 88565297a4..6357f4adf2 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.js @@ -23,12 +23,16 @@ function WorkflowJobTemplateEdit({ template }) { webhook_credential, webhook_key, limit, + job_tags, + skip_tags, ...templatePayload } = values; templatePayload.inventory = inventory?.id || null; templatePayload.organization = organization?.id || null; templatePayload.webhook_credential = webhook_credential?.id || null; templatePayload.limit = limit === '' ? null : limit; + templatePayload.job_tags = job_tags === '' ? null : job_tags; + templatePayload.skip_tags = skip_tags === '' ? null : skip_tags; const formOrgId = organization?.id || inventory?.summary_fields?.organization.id || null; diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.test.js b/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.test.js index cb56e82ef0..4ef1a6cce0 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.test.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateEdit/WorkflowJobTemplateEdit.test.js @@ -161,6 +161,7 @@ describe('', () => { expect(WorkflowJobTemplatesAPI.update).toHaveBeenCalledWith(6, { name: 'Alex', description: 'Apollo and Athena', + skip_tags: '', inventory: 1, organization: 1, scm_branch: 'main', @@ -174,6 +175,11 @@ describe('', () => { ask_limit_on_launch: false, ask_scm_branch_on_launch: false, ask_variables_on_launch: false, + ask_labels_on_launch: false, + ask_skip_tags_on_launch: false, + ask_tags_on_launch: false, + job_tags: null, + skip_tags: null, }); wrapper.update(); await expect(WorkflowJobTemplatesAPI.disassociateLabel).toBeCalledWith(6, { @@ -273,16 +279,21 @@ describe('', () => { expect(WorkflowJobTemplatesAPI.update).toBeCalledWith(6, { allow_simultaneous: false, ask_inventory_on_launch: false, + ask_labels_on_launch: false, ask_limit_on_launch: false, ask_scm_branch_on_launch: false, + ask_skip_tags_on_launch: false, + ask_tags_on_launch: false, ask_variables_on_launch: false, description: 'bar', extra_vars: '---', inventory: 1, + job_tags: null, limit: '5000', name: 'Foo', organization: 1, scm_branch: 'devel', + skip_tags: null, webhook_credential: null, webhook_service: '', webhook_url: '', diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeModal.js b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeModal.js index 7482db48c2..572291bf76 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeModal.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeModal.js @@ -38,6 +38,8 @@ function NodeModalForm({ surveyConfig, isLaunchLoading, resourceDefaultCredentials, + labels, + instanceGroups, }) { const history = useHistory(); const dispatch = useContext(WorkflowDispatchContext); @@ -66,7 +68,9 @@ function NodeModalForm({ surveyConfig, values.nodeResource, askLinkType, - resourceDefaultCredentials + resourceDefaultCredentials, + labels, + instanceGroups ); const handleSaveNode = () => { @@ -241,7 +245,7 @@ const NodeModalInner = ({ title, ...rest }) => { const { request: readLaunchConfigs, error: launchConfigError, - result: { launchConfig, surveyConfig, resourceDefaultCredentials }, + result: { launchConfig, surveyConfig, resourceDefaultCredentials, labels }, isLoading, } = useRequest( useCallback(async () => { @@ -260,9 +264,15 @@ const NodeModalInner = ({ title, ...rest }) => { launchConfig: {}, surveyConfig: {}, resourceDefaultCredentials: [], + labels: [], }; } + const readLabels = + values.nodeType === 'workflow_job_template' + ? WorkflowJobTemplatesAPI.readAllLabels(values.nodeResource.id) + : JobTemplatesAPI.readAllLabels(values.nodeResource.id); + const { data: launch } = await readLaunch( values.nodeType, values?.nodeResource?.id @@ -291,10 +301,21 @@ const NodeModalInner = ({ title, ...rest }) => { defaultCredentials = results; } + let defaultLabels = []; + + if (launch.ask_labels_on_launch) { + const { + data: { results }, + } = await readLabels; + + defaultLabels = results; + } + return { launchConfig: launch, surveyConfig: survey, resourceDefaultCredentials: defaultCredentials, + labels: defaultLabels, }; // eslint-disable-next-line react-hooks/exhaustive-deps @@ -347,6 +368,8 @@ const NodeModalInner = ({ title, ...rest }) => { resourceDefaultCredentials={resourceDefaultCredentials} isLaunchLoading={isLoading} title={wizardTitle} + labels={labels} + instanceGroups={[]} /> ); }; diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeViewModal.js b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeViewModal.js index da77f8d14c..61dd6d477a 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeViewModal.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/NodeViewModal.js @@ -1,12 +1,10 @@ import React, { useContext, useEffect, useCallback } from 'react'; - import { t } from '@lingui/macro'; import { Button, Modal } from '@patternfly/react-core'; import { WorkflowDispatchContext, WorkflowStateContext, } from 'contexts/Workflow'; - import ContentError from 'components/ContentError'; import ContentLoading from 'components/ContentLoading'; import PromptDetail from 'components/PromptDetail'; @@ -21,6 +19,8 @@ function NodeViewModal({ readOnly }) { const { fullUnifiedJobTemplate, originalNodeCredentials, + originalNodeInstanceGroups, + originalNodeLabels, originalNodeObject, promptValues, } = nodeToView; @@ -157,6 +157,22 @@ function NodeViewModal({ readOnly }) { if (launchConfig.ask_inventory_on_launch) { overrides.inventory = originalNodeObject.summary_fields.inventory; } + if (launchConfig.ask_execution_environment_on_launch) { + overrides.execution_environment = + originalNodeObject.summary_fields.execution_environment; + } + if (launchConfig.ask_labels_on_launch) { + overrides.labels = originalNodeLabels || []; + } + if (launchConfig.ask_forks_on_launch) { + overrides.forks = originalNodeObject.forks; + } + if (launchConfig.ask_job_slice_count_on_launch) { + overrides.job_slice_count = originalNodeObject.job_slice_count; + } + if (launchConfig.ask_timeout_on_launch) { + overrides.timeout = originalNodeObject.timeout; + } if (launchConfig.ask_scm_branch_on_launch) { overrides.scm_branch = originalNodeObject.scm_branch; } @@ -190,6 +206,9 @@ function NodeViewModal({ readOnly }) { if (launchConfig.ask_credential_on_launch) { overrides.credentials = originalNodeCredentials || []; } + if (launchConfig.ask_instance_groups_on_launch) { + overrides.instance_groups = originalNodeInstanceGroups || []; + } } let nodeUpdatedConvergence = {}; diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/useWorkflowNodeSteps.js b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/useWorkflowNodeSteps.js index c7baafa3ed..853c23d31a 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/useWorkflowNodeSteps.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Modals/NodeModals/useWorkflowNodeSteps.js @@ -3,9 +3,11 @@ import { useFormikContext } from 'formik'; import { t } from '@lingui/macro'; import useInventoryStep from 'components/LaunchPrompt/steps/useInventoryStep'; import useCredentialsStep from 'components/LaunchPrompt/steps/useCredentialsStep'; +import useExecutionEnvironmentStep from 'components/LaunchPrompt/steps/useExecutionEnvironmentStep'; import useOtherPromptsStep from 'components/LaunchPrompt/steps/useOtherPromptsStep'; import useSurveyStep from 'components/LaunchPrompt/steps/useSurveyStep'; import usePreviewStep from 'components/LaunchPrompt/steps/usePreviewStep'; +import useInstanceGroupsStep from 'components/LaunchPrompt/steps/useInstanceGroupsStep'; import { WorkflowStateContext } from 'contexts/Workflow'; import { jsonToYaml } from 'util/yaml'; import { stringIsUUID } from 'util/strings'; @@ -26,6 +28,12 @@ function showPreviewStep(nodeType, launchConfig) { launchConfig.ask_variables_on_launch || launchConfig.ask_limit_on_launch || launchConfig.ask_scm_branch_on_launch || + launchConfig.ask_execution_environment_on_launch || + launchConfig.ask_labels_on_launch || + launchConfig.ask_forks_on_launch || + launchConfig.ask_job_slice_count_on_launch || + launchConfig.ask_timeout_on_launch || + launchConfig.ask_instance_groups_on_launch || launchConfig.survey_enabled || (launchConfig.variables_needed_to_start && launchConfig.variables_needed_to_start.length > 0) @@ -129,6 +137,20 @@ const getNodeToEditDefaultValues = ( } } + if (launchConfig.ask_execution_environment_on_launch) { + if (nodeToEdit?.promptValues) { + initialValues.execution_environment = + nodeToEdit?.promptValues?.execution_environment; + } else if ( + nodeToEdit?.originalNodeObject?.summary_fields?.execution_environment + ) { + initialValues.execution_environment = + nodeToEdit?.originalNodeObject?.summary_fields?.execution_environment; + } else { + initialValues.execution_environment = null; + } + } + if (launchConfig.ask_credential_on_launch) { if (nodeToEdit?.promptValues?.credentials) { initialValues.credentials = nodeToEdit?.promptValues?.credentials; @@ -197,6 +219,21 @@ const getNodeToEditDefaultValues = ( if (launchConfig.ask_diff_mode_on_launch) { initialValues.diff_mode = sourceOfValues?.diff_mode || false; } + if (launchConfig.ask_forks_on_launch) { + initialValues.forks = sourceOfValues?.forks || 0; + } + if (launchConfig.ask_job_slice_count_on_launch) { + initialValues.job_slice_count = sourceOfValues?.job_slice_count || 1; + } + if (launchConfig.ask_timeout_on_launch) { + initialValues.timeout = sourceOfValues?.timeout || 0; + } + if (launchConfig.ask_labels_on_launch) { + initialValues.labels = sourceOfValues?.labels || []; + } + if (launchConfig.ask_instance_groups_on_launch) { + initialValues.instance_groups = sourceOfValues?.instance_groups || []; + } if (launchConfig.ask_variables_on_launch) { const newExtraData = { ...sourceOfValues.extra_data }; @@ -242,7 +279,9 @@ export default function useWorkflowNodeSteps( surveyConfig, resource, askLinkType, - resourceDefaultCredentials + resourceDefaultCredentials, + labels, + instanceGroups ) { const { nodeToEdit } = useContext(WorkflowStateContext); const { @@ -258,7 +297,9 @@ export default function useWorkflowNodeSteps( useDaysToKeepStep(), useInventoryStep(launchConfig, resource, visited), useCredentialsStep(launchConfig, resource, resourceDefaultCredentials), - useOtherPromptsStep(launchConfig, resource), + useExecutionEnvironmentStep(launchConfig, resource), + useInstanceGroupsStep(launchConfig, resource, instanceGroups), + useOtherPromptsStep(launchConfig, resource, labels), useSurveyStep(launchConfig, surveyConfig, resource, visited), ]; @@ -348,6 +389,8 @@ export default function useWorkflowNodeSteps( setVisited({ inventory: true, credentials: true, + executionEnvironment: true, + instanceGroups: true, other: true, survey: true, preview: true, diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.js b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.js index 170dc59ad1..f0ae5bcfb3 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.js @@ -1,6 +1,5 @@ import React, { useCallback, useEffect, useReducer } from 'react'; import { useHistory } from 'react-router-dom'; - import styled from 'styled-components'; import { shape } from 'prop-types'; import { t } from '@lingui/macro'; @@ -18,6 +17,7 @@ import ContentLoading from 'components/ContentLoading'; import workflowReducer from 'components/Workflow/workflowReducer'; import useRequest, { useDismissableError } from 'hooks/useRequest'; import { + OrganizationsAPI, WorkflowApprovalTemplatesAPI, WorkflowJobTemplateNodesAPI, WorkflowJobTemplatesAPI, @@ -53,7 +53,18 @@ const Wrapper = styled.div` `; const replaceIdentifier = (node) => { - if (stringIsUUID(node.originalNodeObject.identifier) || node.identifier) { + if ( + stringIsUUID(node.originalNodeObject.identifier) && + typeof node.identifier === 'string' && + node.identifier !== '' + ) { + return true; + } + + if ( + !stringIsUUID(node.originalNodeObject.identifier) && + node.originalNodeObject.identifier !== node.identifier + ) { return true; } @@ -126,6 +137,7 @@ function Visualizer({ template }) { addNodeTarget: null, addingLink: false, contentError: null, + defaultOrganization: null, isLoading: true, linkToDelete: null, linkToEdit: null, @@ -148,6 +160,7 @@ function Visualizer({ template }) { addLinkTargetNode, addNodeSource, contentError, + defaultOrganization, isLoading, linkToDelete, linkToEdit, @@ -261,6 +274,14 @@ function Visualizer({ template }) { useEffect(() => { async function fetchData() { try { + const { + data: { results }, + } = await OrganizationsAPI.read({ page_size: 1, page: 1 }); + dispatch({ + type: 'SET_DEFAULT_ORGANIZATION', + value: results[0]?.id, + }); + const workflowNodes = await fetchWorkflowNodes(template.id); dispatch({ type: 'GENERATE_NODES_AND_LINKS', @@ -302,6 +323,9 @@ function Visualizer({ template }) { const deletedNodeIds = []; const associateCredentialRequests = []; const disassociateCredentialRequests = []; + const associateLabelRequests = []; + const disassociateLabelRequests = []; + const instanceGroupRequests = []; const generateLinkMapAndNewLinks = () => { const linkMap = {}; @@ -400,6 +424,8 @@ function Visualizer({ template }) { nodeRequests.push( WorkflowJobTemplatesAPI.createNode(template.id, { ...node.promptValues, + execution_environment: + node.promptValues?.execution_environment?.id || null, inventory: node.promptValues?.inventory?.id || null, unified_job_template: node.fullUnifiedJobTemplate.id, all_parents_must_converge: node.all_parents_must_converge, @@ -423,6 +449,29 @@ function Visualizer({ template }) { ); }); } + + if (node.promptValues?.labels?.length > 0) { + node.promptValues.labels.forEach((label) => { + associateLabelRequests.push( + WorkflowJobTemplateNodesAPI.associateLabel( + data.id, + label, + node.fullUnifiedJobTemplate.organization || + defaultOrganization + ) + ); + }); + } + if (node.promptValues?.instance_groups?.length > 0) + /* eslint-disable no-await-in-loop, no-restricted-syntax */ + for (const group of node.promptValues.instance_groups) { + instanceGroupRequests.push( + WorkflowJobTemplateNodesAPI.associateInstanceGroup( + data.id, + group.id + ) + ); + } }) ); } @@ -487,6 +536,8 @@ function Visualizer({ template }) { nodeRequests.push( WorkflowJobTemplateNodesAPI.replace(node.originalNodeObject.id, { ...node.promptValues, + execution_environment: + node.promptValues?.execution_environment?.id || null, inventory: node.promptValues?.inventory?.id || null, unified_job_template: node.fullUnifiedJobTemplate.id, all_parents_must_converge: node.all_parents_must_converge, @@ -503,6 +554,12 @@ function Visualizer({ template }) { node.promptValues?.credentials ); + const { added: addedLabels, removed: removedLabels } = + getAddedAndRemoved( + node?.originalNodeLabels, + node.promptValues?.labels + ); + if (addedCredentials.length > 0) { addedCredentials.forEach((cred) => { associateCredentialRequests.push( @@ -523,6 +580,41 @@ function Visualizer({ template }) { ) ); } + + if (addedLabels.length > 0) { + addedLabels.forEach((label) => { + associateLabelRequests.push( + WorkflowJobTemplateNodesAPI.associateLabel( + node.originalNodeObject.id, + label, + node.fullUnifiedJobTemplate.organization || + defaultOrganization + ) + ); + }); + } + if (removedLabels?.length > 0) { + removedLabels.forEach((label) => + disassociateLabelRequests.push( + WorkflowJobTemplateNodesAPI.disassociateLabel( + node.originalNodeObject.id, + label, + node.fullUnifiedJobTemplate.organization || + defaultOrganization + ) + ) + ); + } + + if (node.promptValues?.instance_groups) { + instanceGroupRequests.push( + WorkflowJobTemplateNodesAPI.orderInstanceGroups( + node.originalNodeObject.id, + node.promptValues?.instance_groups, + node?.originalNodeInstanceGroups || [] + ) + ); + } }) ); } @@ -539,11 +631,18 @@ function Visualizer({ template }) { ); await Promise.all(associateNodes(newLinks, originalLinkMap)); - await Promise.all(disassociateCredentialRequests); - await Promise.all(associateCredentialRequests); + await Promise.all([ + ...disassociateCredentialRequests, + ...disassociateLabelRequests, + ]); + await Promise.all([ + ...associateCredentialRequests, + ...associateLabelRequests, + ...instanceGroupRequests, + ]); history.push(`/templates/workflow_job_template/${template.id}/details`); - }, [links, nodes, history, template.id]), + }, [links, nodes, history, defaultOrganization, template.id]), {} ); diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.test.js b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.test.js index 1be1ae3bdd..28b250dca3 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.test.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/Visualizer.test.js @@ -1,6 +1,7 @@ import React from 'react'; import { act } from 'react-dom/test-utils'; import { + OrganizationsAPI, WorkflowApprovalTemplatesAPI, WorkflowJobTemplateNodesAPI, WorkflowJobTemplatesAPI, @@ -104,6 +105,12 @@ const mockWorkflowNodes = [ describe('Visualizer', () => { let wrapper; beforeEach(() => { + OrganizationsAPI.read.mockResolvedValue({ + data: { + count: 1, + results: [{ id: 1, name: 'Default' }], + }, + }); WorkflowJobTemplatesAPI.readNodes.mockResolvedValue({ data: { count: mockWorkflowNodes.length, diff --git a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/VisualizerNode.js b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/VisualizerNode.js index 88f6346f2b..9b42148346 100644 --- a/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/VisualizerNode.js +++ b/awx/ui/src/screens/Template/WorkflowJobTemplateVisualizer/VisualizerNode.js @@ -64,7 +64,6 @@ function VisualizerNode({ }) { const ref = useRef(null); const [hovering, setHovering] = useState(false); - const [credentialsError, setCredentialsError] = useState(null); const [detailError, setDetailError] = useState(null); const dispatch = useContext(WorkflowDispatchContext); const { addingLink, addLinkSourceNode, nodePositions, nodes } = @@ -72,7 +71,6 @@ function VisualizerNode({ const isAddLinkSourceNode = addLinkSourceNode && addLinkSourceNode.id === node.id; - const handleCredentialsErrorClose = () => setCredentialsError(null); const handleDetailErrorClose = () => setDetailError(null); const updateNode = async () => { @@ -98,18 +96,47 @@ function VisualizerNode({ if ( node?.originalNodeObject?.summary_fields?.unified_job_template - ?.unified_job_type === 'job' && - !node?.originalNodeCredentials + ?.unified_job_type === 'job' || + node?.originalNodeObject?.summary_fields?.unified_job_template + ?.unified_job_type === 'workflow_job' ) { try { - const { - data: { results }, - } = await WorkflowJobTemplateNodesAPI.readCredentials( - node.originalNodeObject.id - ); - updatedNode.originalNodeCredentials = results; + if ( + node?.originalNodeObject?.summary_fields?.unified_job_template + ?.unified_job_type === 'job' && + !node?.originalNodeCredentials + ) { + const { + data: { results }, + } = await WorkflowJobTemplateNodesAPI.readCredentials( + node.originalNodeObject.id + ); + updatedNode.originalNodeCredentials = results; + } + if ( + node?.originalNodeObject?.summary_fields?.unified_job_template + ?.unified_job_type === 'job' && + !node.originalNodeLabels + ) { + const { + data: { results }, + } = await WorkflowJobTemplateNodesAPI.readAllLabels( + node.originalNodeObject.id + ); + updatedNode.originalNodeLabels = results; + updatedNode.originalNodeObject.labels = results; + } + if (!node.originalNodeInstanceGroups) { + const { + data: { results }, + } = await WorkflowJobTemplateNodesAPI.readInstanceGroups( + node.originalNodeObject.id + ); + updatedNode.originalNodeInstanceGroups = results; + updatedNode.originalNodeObject.instance_groups = results; + } } catch (err) { - setCredentialsError(err); + setDetailError(err); return null; } } @@ -350,17 +377,6 @@ function VisualizerNode({ )} - {credentialsError && ( - - {t`Failed to retrieve node credentials.`} - - - )} ); } diff --git a/awx/ui/src/screens/Template/shared/JobTemplate.helptext.js b/awx/ui/src/screens/Template/shared/JobTemplate.helptext.js index 4d7ce59c47..e1b6e589ff 100644 --- a/awx/ui/src/screens/Template/shared/JobTemplate.helptext.js +++ b/awx/ui/src/screens/Template/shared/JobTemplate.helptext.js @@ -6,7 +6,7 @@ const jtHelpTextStrings = () => ({ jobType: t`For job templates, select run to execute the playbook. Select check to only check playbook syntax, test environment setup, and report problems without executing the playbook.`, inventory: t`Select the inventory containing the hosts you want this job to manage.`, project: t`Select the project containing the playbook you want this job to execute.`, - executionEnvironmentForm: t`Select the execution environment for this job template.`, + executionEnvironmentForm: t`The container image to be used for execution.`, executionEnvironmentDetail: t`The execution environment that will be used when launching this job template. The resolved execution environment can be overridden by explicitly assigning a different one to this job template.`, playbook: t`Select the playbook to be executed by this job.`, credentials: t`Select credentials for accessing the nodes this job will be ran against. You can only select one credential of each type. For machine credentials (SSH), checking "Prompt on launch" without selecting credentials will require you to select a machine credential at run time. If you select credentials and check "Prompt on launch", the selected credential(s) become the defaults that can be updated at run time.`, @@ -24,7 +24,7 @@ const jtHelpTextStrings = () => ({ webhookURL: t`Webhook services can launch jobs with this workflow job template by making a POST request to this URL.`, webhookKey: t`Webhook services can use this as a shared secret.`, webhookCredential: t`Optionally select the credential to use to send status updates back to the webhook service.`, - sourceControlBranch: t`Select a branch for the workflow. This branch is applied to all job template nodes that prompt for a branch.`, + sourceControlBranch: t`Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true.`, provisioningCallbacks: (brandName = '') => t`Enables creation of a provisioning callback URL. Using the URL a host can contact ${brandName} and request a configuration update using this job template.`, privilegeEscalation: t`If enabled, run this playbook as an administrator.`, diff --git a/awx/ui/src/screens/Template/shared/JobTemplateForm.js b/awx/ui/src/screens/Template/shared/JobTemplateForm.js index 2bf799c8d7..a82aaa8ad3 100644 --- a/awx/ui/src/screens/Template/shared/JobTemplateForm.js +++ b/awx/ui/src/screens/Template/shared/JobTemplateForm.js @@ -1,6 +1,5 @@ import React, { useState, useEffect, useCallback } from 'react'; import PropTypes from 'prop-types'; - import { t } from '@lingui/macro'; import { withFormik, useField } from 'formik'; import { @@ -87,6 +86,10 @@ function JobTemplateForm({ const [credentialField, , credentialHelpers] = useField('credentials'); const [labelsField, , labelsHelpers] = useField('labels'); const [limitField, limitMeta, limitHelpers] = useField('limit'); + const [forksField, forksMeta, forksHelpers] = useField('forks'); + const [jobSliceCountField, jobSliceCountMeta, jobSliceCountHelpers] = + useField('job_slice_count'); + const [timeoutField, timeoutMeta, timeoutHelpers] = useField('timeout'); const [diffModeField, , diffModeHelpers] = useField('diff_mode'); const [instanceGroupsField, , instanceGroupsHelpers] = useField('instanceGroups'); @@ -321,6 +324,9 @@ function JobTemplateForm({ globallyAvailable isDisabled={!projectField.value?.id} projectId={projectField.value?.id} + promptId="template-ask-execution-environment-on-launch" + promptName="ask_execution_environment_on_launch" + isPromptableField /> {projectField.value?.allow_override && ( @@ -376,10 +382,12 @@ function JobTemplateForm({ onError={setContentError} /> - } + - + - + > + { + forksHelpers.setValue(value); + }} + type="number" + min="0" + /> + - - + { + jobSliceCountHelpers.setValue(value); + }} + type="number" + min="1" + /> + + + > + { + timeoutHelpers.setValue(value); + }} + type="number" + min="0" + /> + instanceGroupsHelpers.setValue(value)} tooltip={helpText.instanceGroups} fieldName="instanceGroups" + promptId="template-ask-instance-groups-on-launch" + promptName="ask_instance_groups_on_launch" + isPromptableField /> ({ playbook. Multiple patterns are allowed. Refer to Ansible documentation for more information and examples on patterns.`, sourceControlBranch: t`Select a branch for the workflow. This branch is applied to all job template nodes that prompt for a branch.`, - labels: t`Optional labels that describe this job template, + labels: t`Optional labels that describe this workflow job template, such as 'dev' or 'test'. Labels can be used to group and filter - job templates and completed jobs.`, + workflow job templates and completed jobs.`, variables: t`Pass extra command line variables to the playbook. This is the -e or --extra-vars command line parameter for ansible-playbook. Provide key/value pairs using either YAML or JSON. Refer to the Ansible Controller documentation for example syntax.`, enableWebhook: t`Enable Webhook for this workflow job template.`, enableConcurrentJobs: t`If enabled, simultaneous runs of this workflow job template will be allowed.`, @@ -18,6 +18,8 @@ const wfHelpTextStrings = () => ({ webhookKey: t`Webhook services can use this as a shared secret.`, webhookCredential: t`Optionally select the credential to use to send status updates back to the webhook service.`, webhookService: t`Select a webhook service.`, + jobTags: t`Tags are useful when you have a large playbook, and you want to run a specific part of a play or task. Use commas to separate multiple tags. Refer to the documentation for details on the usage of tags.`, + skipTags: t`Skip tags are useful when you have a large playbook, and you want to skip specific parts of a play or task. Use commas to separate multiple tags. Refer to the documentation for details on the usage of tags.`, enabledOptions: ( <>

{t`Concurrent jobs: If enabled, simultaneous runs of this workflow job template will be allowed.`}

diff --git a/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.js b/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.js index 49ddb8892e..9d974f3105 100644 --- a/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.js +++ b/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.js @@ -27,6 +27,7 @@ import CheckboxField from 'components/FormField/CheckboxField'; import Popover from 'components/Popover'; import { WorkFlowJobTemplate } from 'types'; import LabelSelect from 'components/LabelSelect'; +import { TagMultiSelect } from 'components/MultiSelect'; import WebhookSubForm from './WebhookSubForm'; import getHelpText from './WorkflowJobTemplate.helptext'; @@ -59,6 +60,8 @@ function WorkflowJobTemplateForm({ const [, webhookKeyMeta, webhookKeyHelpers] = useField('webhook_key'); const [, webhookCredentialMeta, webhookCredentialHelpers] = useField('webhook_credential'); + const [skipTagsField, , skipTagsHelpers] = useField('skip_tags'); + const [jobTagsField, , jobTagsHelpers] = useField('job_tags'); useEffect(() => { if (enableWebhooks) { @@ -167,7 +170,6 @@ function WorkflowJobTemplateForm({ }} />
-
- } + - + + + jobTagsHelpers.setValue(value)} + /> + + + skipTagsHelpers.setValue(value)} + /> + @@ -280,13 +308,18 @@ const FormikApp = withFormik({ extra_vars: template.extra_vars || '---', limit: template.limit || '', scm_branch: template.scm_branch || '', + skip_tags: template.skip_tags || '', + job_tags: template.job_tags || '', allow_simultaneous: template.allow_simultaneous || false, webhook_credential: template?.summary_fields?.webhook_credential || null, webhook_service: template.webhook_service || '', + ask_labels_on_launch: template.ask_labels_on_launch || false, ask_limit_on_launch: template.ask_limit_on_launch || false, ask_inventory_on_launch: template.ask_inventory_on_launch || false, ask_variables_on_launch: template.ask_variables_on_launch || false, ask_scm_branch_on_launch: template.ask_scm_branch_on_launch || false, + ask_skip_tags_on_launch: template.ask_skip_tags_on_launch || false, + ask_tags_on_launch: template.ask_tags_on_launch || false, webhook_url: template?.related?.webhook_receiver ? `${urlOrigin}${template.related.webhook_receiver}` : '', diff --git a/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.test.js b/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.test.js index 17234df88c..65b5f59522 100644 --- a/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.test.js +++ b/awx/ui/src/screens/Template/shared/WorkflowJobTemplateForm.test.js @@ -189,7 +189,9 @@ describe('', () => { 'FieldWithPrompt[label="Inventory"]', 'FieldWithPrompt[label="Limit"]', 'FieldWithPrompt[label="Source control branch"]', - 'FormGroup[label="Labels"]', + 'FieldWithPrompt[label="Labels"]', + 'FieldWithPrompt[label="Skip Tags"]', + 'FieldWithPrompt[label="Job Tags"]', 'VariablesField', ]; diff --git a/awx/ui/src/util/labels.js b/awx/ui/src/util/labels.js new file mode 100644 index 0000000000..8e973d5836 --- /dev/null +++ b/awx/ui/src/util/labels.js @@ -0,0 +1,57 @@ +import { LabelsAPI, OrganizationsAPI } from '../api'; + +async function createNewLabels(labels = [], organization = null) { + let error = null; + const labelIds = []; + + try { + const newLabels = []; + const labelRequests = []; + let organizationId = organization; + if (labels) { + labels.forEach((label) => { + if (typeof label.id !== 'number') { + newLabels.push(label); + } else { + labelIds.push(label.id); + } + }); + } + + if (newLabels.length > 0) { + if (!organizationId) { + // eslint-disable-next-line no-useless-catch + try { + const { + data: { results }, + } = await OrganizationsAPI.read(); + organizationId = results[0].id; + } catch (err) { + throw err; + } + } + } + + newLabels.forEach((label) => { + labelRequests.push( + LabelsAPI.create({ + name: label.name, + organization: organizationId, + }).then(({ data }) => { + labelIds.push(data.id); + }) + ); + }); + + await Promise.all(labelRequests); + } catch (err) { + error = err; + } + + return { + labelIds, + error, + }; +} + +export default createNewLabels; diff --git a/awx_collection/plugins/modules/job_launch.py b/awx_collection/plugins/modules/job_launch.py index 4305988964..fc60aea995 100644 --- a/awx_collection/plugins/modules/job_launch.py +++ b/awx_collection/plugins/modules/job_launch.py @@ -86,6 +86,33 @@ options: description: - Passwords for credentials which are set to prompt on launch type: dict + execution_environment: + description: + - Execution environment to use for the job, only used if prompt for execution environment is set. + type: str + forks: + description: + - Forks to use for the job, only used if prompt for forks is set. + type: int + instance_groups: + description: + - Instance groups to use for the job, only used if prompt for instance groups is set. + type: list + elements: str + job_slice_count: + description: + - Job slice count to use for the job, only used if prompt for job slice count is set. + type: int + labels: + description: + - Labels to use for the job, only used if prompt for labels is set. + type: list + elements: str + job_timeout: + description: + - Timeout to use for the job, only used if prompt for timeout is set. + - This parameter is sent through the API to the job. + type: int wait: description: - Wait for the job to complete. @@ -100,7 +127,7 @@ options: timeout: description: - If waiting for the job to complete this will abort after this - amount of seconds + amount of seconds. This happens on the module side. type: int extends_documentation_fragment: awx.awx.auth ''' @@ -165,6 +192,12 @@ def main(): verbosity=dict(type='int', choices=[0, 1, 2, 3, 4, 5]), diff_mode=dict(type='bool'), credential_passwords=dict(type='dict', no_log=False), + execution_environment=dict(), + forks=dict(type='int'), + instance_groups=dict(type='list', elements='str'), + job_slice_count=dict(type='int'), + labels=dict(type='list', elements='str'), + job_timeout=dict(type='int'), wait=dict(default=False, type='bool'), interval=dict(default=2.0, type='float'), timeout=dict(default=None, type='int'), @@ -179,6 +212,9 @@ def main(): inventory = module.params.get('inventory') organization = module.params.get('organization') credentials = module.params.get('credentials') + execution_environment = module.params.get('execution_environment') + instance_groups = module.params.get('instance_groups') + labels = module.params.get('labels') wait = module.params.get('wait') interval = module.params.get('interval') timeout = module.params.get('timeout') @@ -191,6 +227,9 @@ def main(): 'verbosity', 'diff_mode', 'credential_passwords', + 'forks', + 'job_slice_count', + 'job_timeout', ): field_val = module.params.get(field_name) if field_val is not None: @@ -204,6 +243,11 @@ def main(): if skip_tags is not None: optional_args['skip_tags'] = ",".join(skip_tags) + # job_timeout is special because its actually timeout but we already had a timeout variable + job_timeout = module.params.get('job_timeout') + if job_timeout is not None: + optional_args['timeout'] = job_timeout + # Create a datastructure to pass into our job launch post_data = {} for arg_name, arg_value in optional_args.items(): @@ -213,11 +257,21 @@ def main(): # Attempt to look up the related items the user specified (these will fail the module if not found) if inventory: post_data['inventory'] = module.resolve_name_to_id('inventories', inventory) + if execution_environment: + post_data['execution_environment'] = module.resolve_name_to_id('execution_environments', execution_environment) if credentials: post_data['credentials'] = [] for credential in credentials: post_data['credentials'].append(module.resolve_name_to_id('credentials', credential)) + if labels: + post_data['labels'] = [] + for label in labels: + post_data['labels'].append(module.resolve_name_to_id('labels', label)) + if instance_groups: + post_data['instance_groups'] = [] + for instance_group in instance_groups: + post_data['instance_groups'].append(module.resolve_name_to_id('instance_groups', instance_group)) # Attempt to look up job_template based on the provided name lookup_data = {} diff --git a/awx_collection/plugins/modules/job_template.py b/awx_collection/plugins/modules/job_template.py index 7b412166e3..5a7e9b6e25 100644 --- a/awx_collection/plugins/modules/job_template.py +++ b/awx_collection/plugins/modules/job_template.py @@ -208,6 +208,42 @@ options: type: bool aliases: - ask_credential + ask_execution_environment_on_launch: + description: + - Prompt user for execution environment on launch. + type: bool + aliases: + - ask_execution_environment + ask_forks_on_launch: + description: + - Prompt user for forks on launch. + type: bool + aliases: + - ask_forks + ask_instance_groups_on_launch: + description: + - Prompt user for instance groups on launch. + type: bool + aliases: + - ask_instance_groups + ask_job_slice_count_on_launch: + description: + - Prompt user for job slice count on launch. + type: bool + aliases: + - ask_job_slice_count + ask_labels_on_launch: + description: + - Prompt user for labels on launch. + type: bool + aliases: + - ask_labels + ask_timeout_on_launch: + description: + - Prompt user for timeout on launch. + type: bool + aliases: + - ask_timeout survey_enabled: description: - Enable a survey on the job template. @@ -385,6 +421,12 @@ def main(): ask_verbosity_on_launch=dict(type='bool', aliases=['ask_verbosity']), ask_inventory_on_launch=dict(type='bool', aliases=['ask_inventory']), ask_credential_on_launch=dict(type='bool', aliases=['ask_credential']), + ask_execution_environment_on_launch=dict(type='bool', aliases=['ask_execution_environment']), + ask_forks_on_launch=dict(type='bool', aliases=['ask_forks']), + ask_instance_groups_on_launch=dict(type='bool', aliases=['ask_instance_groups']), + ask_job_slice_count_on_launch=dict(type='bool', aliases=['ask_job_slice_count']), + ask_labels_on_launch=dict(type='bool', aliases=['ask_labels']), + ask_timeout_on_launch=dict(type='bool', aliases=['ask_timeout']), survey_enabled=dict(type='bool'), survey_spec=dict(type="dict"), become_enabled=dict(type='bool'), @@ -484,6 +526,12 @@ def main(): 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', + 'ask_execution_environment_on_launch', + 'ask_forks_on_launch', + 'ask_instance_groups_on_launch', + 'ask_job_slice_count_on_launch', + 'ask_labels_on_launch', + 'ask_timeout_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode', diff --git a/awx_collection/plugins/modules/schedule.py b/awx_collection/plugins/modules/schedule.py index 4cbafaeed4..d0fac2384e 100644 --- a/awx_collection/plugins/modules/schedule.py +++ b/awx_collection/plugins/modules/schedule.py @@ -42,17 +42,39 @@ options: - Optional description of this schedule. required: False type: str + execution_environment: + description: + - Execution Environment applied as a prompt, assuming jot template prompts for execution environment + type: str extra_data: description: - Specify C(extra_vars) for the template. required: False type: dict default: {} + forks: + description: + - Forks applied as a prompt, assuming job template prompts for forks + type: int + instance_groups: + description: + - List of Instance Groups applied as a prompt, assuming job template prompts for instance groups + type: list + elements: str inventory: description: - Inventory applied as a prompt, assuming job template prompts for inventory required: False type: str + job_slice_count: + description: + - Job Slice Count applied as a prompt, assuming job template prompts for job slice count + type: int + labels: + description: + - List of labels applied as a prompt, assuming job template prompts for labels + type: list + elements: str credentials: description: - List of credentials applied as a prompt, assuming job template prompts for credentials @@ -63,6 +85,10 @@ options: - Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true. required: False type: str + timeout: + description: + - Timeout applied as a prompt, assuming job template prompts for timeout + type: int job_type: description: - The job type to use for the job template. @@ -176,8 +202,14 @@ def main(): name=dict(required=True), new_name=dict(), description=dict(), + execution_environment=dict(type='str'), extra_data=dict(type='dict'), + forks=dict(type='int'), + instance_groups=dict(type='list', elements='str'), inventory=dict(), + job_slice_count=dict(type='int'), + labels=dict(type='list', elements='str'), + timeout=dict(type='int'), credentials=dict(type='list', elements='str'), scm_branch=dict(), job_type=dict(choices=['run', 'check']), @@ -200,8 +232,14 @@ def main(): name = module.params.get('name') new_name = module.params.get("new_name") description = module.params.get('description') + execution_environment = module.params.get('execution_environment') extra_data = module.params.get('extra_data') + forks = module.params.get('forks') + instance_groups = module.params.get('instance_groups') inventory = module.params.get('inventory') + job_slice_count = module.params.get('job_slice_count') + labels = module.params.get('labels') + timeout = module.params.get('timeout') credentials = module.params.get('credentials') scm_branch = module.params.get('scm_branch') job_type = module.params.get('job_type') @@ -238,6 +276,28 @@ def main(): for item in credentials: association_fields['credentials'].append(module.resolve_name_to_id('credentials', item)) + # We need to clear out the name from the search fields so we can use name_or_id in the following searches + if 'name' in search_fields: + del search_fields['name'] + + if labels is not None: + association_fields['labels'] = [] + for item in labels: + label_id = module.get_one('labels', name_or_id=item, **{'data': search_fields}) + if label_id is None: + module.fail_json(msg='Could not find label entry with name {0}'.format(item)) + else: + association_fields['labels'].append(label_id['id']) + + if instance_groups is not None: + association_fields['instance_groups'] = [] + for item in instance_groups: + instance_group_id = module.get_one('instance_groups', name_or_id=item, **{'data': search_fields}) + if instance_group_id is None: + module.fail_json(msg='Could not find instance_group entry with name {0}'.format(item)) + else: + association_fields['instance_groups'].append(instance_group_id['id']) + # Create the data that gets sent for create and update new_fields = {} if rrule is not None: @@ -267,6 +327,22 @@ def main(): new_fields['unified_job_template'] = unified_job_template_id if enabled is not None: new_fields['enabled'] = enabled + if forks is not None: + new_fields['forks'] = forks + if job_slice_count is not None: + new_fields['job_slice_count'] = job_slice_count + if timeout is not None: + new_fields['timeout'] = timeout + + if execution_environment is not None: + if execution_environment == '': + new_fields['execution_environment'] = '' + else: + ee = module.get_one('execution_environments', name_or_id=execution_environment, **{'data': search_fields}) + if ee is None: + module.fail_json(msg='could not find execution_environment entry with name {0}'.format(execution_environment)) + else: + new_fields['execution_environment'] = ee['id'] if state == 'absent': # If the state was absent we can let the module delete it if needed, the module will handle exiting from this diff --git a/awx_collection/plugins/modules/workflow_job_template.py b/awx_collection/plugins/modules/workflow_job_template.py index afc792e1f1..93eb451503 100644 --- a/awx_collection/plugins/modules/workflow_job_template.py +++ b/awx_collection/plugins/modules/workflow_job_template.py @@ -47,6 +47,16 @@ options: description: - Variables which will be made available to jobs ran inside the workflow. type: dict + job_tags: + description: + - Comma separated list of the tags to use for the job template. + type: str + ask_tags_on_launch: + description: + - Prompt user for job tags on launch. + type: bool + aliases: + - ask_tags organization: description: - Organization the workflow job template exists in. @@ -85,6 +95,22 @@ options: description: - Prompt user for limit on launch of this workflow job template type: bool + ask_labels_on_launch: + description: + - Prompt user for labels on launch. + type: bool + aliases: + - ask_labels + ask_skip_tags_on_launch: + description: + - Prompt user for job tags to skip on launch. + type: bool + aliases: + - ask_skip_tags + skip_tags: + description: + - Comma separated list of the tags to skip for the job template. + type: str webhook_service: description: - Service that webhook requests will be accepted from @@ -665,11 +691,15 @@ def main(): copy_from=dict(), description=dict(), extra_vars=dict(type='dict'), + job_tags=dict(), + skip_tags=dict(), organization=dict(), survey_spec=dict(type='dict', aliases=['survey']), survey_enabled=dict(type='bool'), allow_simultaneous=dict(type='bool'), ask_variables_on_launch=dict(type='bool'), + ask_labels_on_launch=dict(type='bool', aliases=['ask_labels']), + ask_skip_tags_on_launch=dict(type='bool', aliases=['ask_skip_tags']), inventory=dict(), limit=dict(), scm_branch=dict(), @@ -752,7 +782,11 @@ def main(): 'ask_scm_branch_on_launch', 'ask_limit_on_launch', 'ask_variables_on_launch', + 'ask_labels_on_launch', + 'ask_skip_tags_on_launch', 'webhook_service', + 'job_tags', + 'skip_tags', ): field_val = module.params.get(field_name) if field_val is not None: diff --git a/awx_collection/plugins/modules/workflow_job_template_node.py b/awx_collection/plugins/modules/workflow_job_template_node.py index d4732b12e0..f91d308282 100644 --- a/awx_collection/plugins/modules/workflow_job_template_node.py +++ b/awx_collection/plugins/modules/workflow_job_template_node.py @@ -152,6 +152,32 @@ options: - Uniqueness is not handled rigorously. type: list elements: str + execution_environment: + description: + - Execution Environment applied as a prompt, assuming jot template prompts for execution environment + type: str + forks: + description: + - Forks applied as a prompt, assuming job template prompts for forks + type: int + instance_groups: + description: + - List of Instance Groups applied as a prompt, assuming job template prompts for instance groups + type: list + elements: str + job_slice_count: + description: + - Job Slice Count applied as a prompt, assuming job template prompts for job slice count + type: int + labels: + description: + - List of labels applied as a prompt, assuming job template prompts for labels + type: list + elements: str + timeout: + description: + - Timeout applied as a prompt, assuming job template prompts for timeout + type: int state: description: - Desired state of the resource. @@ -255,6 +281,12 @@ def main(): always_nodes=dict(type='list', elements='str'), failure_nodes=dict(type='list', elements='str'), credentials=dict(type='list', elements='str'), + execution_environment=dict(type='str'), + forks=dict(type='int'), + instance_groups=dict(type='list', elements='str'), + job_slice_count=dict(type='int'), + labels=dict(type='list', elements='str'), + timeout=dict(type='int'), state=dict(choices=['present', 'absent'], default='present'), ) mutually_exclusive = [("unified_job_template", "approval_node")] @@ -327,32 +359,44 @@ def main(): 'diff_mode', 'verbosity', 'all_parents_must_converge', + 'forks', + 'job_slice_count', + 'timeout', ): field_val = module.params.get(field_name) if field_val: new_fields[field_name] = field_val association_fields = {} - for association in ('always_nodes', 'success_nodes', 'failure_nodes', 'credentials'): + for association in ('always_nodes', 'success_nodes', 'failure_nodes', 'credentials', 'instance_groups', 'labels'): name_list = module.params.get(association) if name_list is None: continue id_list = [] for sub_name in name_list: - if association == 'credentials': - endpoint = 'credentials' - lookup_data = {'name': sub_name} + if association in ['credentials', 'instance_groups', 'labels']: + sub_obj = module.get_one(association, name_or_id=sub_name) else: endpoint = 'workflow_job_template_nodes' lookup_data = {'identifier': sub_name} if workflow_job_template_id: lookup_data['workflow_job_template'] = workflow_job_template_id - sub_obj = module.get_one(endpoint, **{'data': lookup_data}) + sub_obj = module.get_one(endpoint, **{'data': lookup_data}) if sub_obj is None: module.fail_json(msg='Could not find {0} entry with name {1}'.format(association, sub_name)) id_list.append(sub_obj['id']) - if id_list: - association_fields[association] = id_list + association_fields[association] = id_list + + execution_environment = module.params.get('execution_environment') + if execution_environment is not None: + if execution_environment == '': + new_fields['execution_environment'] = '' + else: + ee = module.get_one('execution_environments', name_or_id=execution_environment) + if ee is None: + module.fail_json(msg='could not find execution_environment entry with name {0}'.format(execution_environment)) + else: + new_fields['execution_environment'] = ee['id'] # In the case of a new object, the utils need to know it is a node new_fields['type'] = 'workflow_job_template_node' diff --git a/awx_collection/test/awx/test_job_template.py b/awx_collection/test/awx/test_job_template.py index e213d1b7d5..e785a63a34 100644 --- a/awx_collection/test/awx/test_job_template.py +++ b/awx_collection/test/awx/test_job_template.py @@ -46,6 +46,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory): 'timeout': 50, 'allow_simultaneous': True, 'ask_limit_on_launch': True, + 'ask_execution_environment_on_launch': True, + 'ask_forks_on_launch': True, + 'ask_instance_groups_on_launch': True, + 'ask_job_slice_count_on_launch': True, + 'ask_labels_on_launch': True, + 'ask_timeout_on_launch': True, } result = run_module('job_template', module_args, admin_user) @@ -55,6 +61,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory): assert jt.timeout == 50 assert jt.allow_simultaneous assert jt.ask_limit_on_launch + assert jt.ask_execution_environment_on_launch + assert jt.ask_forks_on_launch + assert jt.ask_instance_groups_on_launch + assert jt.ask_job_slice_count_on_launch + assert jt.ask_labels_on_launch + assert jt.ask_timeout_on_launch module_args = { 'name': 'foo', @@ -68,6 +80,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory): 'timeout': 0, 'allow_simultaneous': False, 'ask_limit_on_launch': False, + 'ask_execution_environment_on_launch': False, + 'ask_forks_on_launch': False, + 'ask_instance_groups_on_launch': False, + 'ask_job_slice_count_on_launch': False, + 'ask_labels_on_launch': False, + 'ask_timeout_on_launch': False, } result = run_module('job_template', module_args, admin_user) @@ -78,6 +96,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory): assert jt.timeout == 0 assert not jt.allow_simultaneous assert not jt.ask_limit_on_launch + assert not jt.ask_execution_environment_on_launch + assert not jt.ask_forks_on_launch + assert not jt.ask_instance_groups_on_launch + assert not jt.ask_job_slice_count_on_launch + assert not jt.ask_labels_on_launch + assert not jt.ask_timeout_on_launch @pytest.mark.django_db diff --git a/awx_collection/test/awx/test_workflow_job_template.py b/awx_collection/test/awx/test_workflow_job_template.py index c5448b23aa..60a4fff7cf 100644 --- a/awx_collection/test/awx/test_workflow_job_template.py +++ b/awx_collection/test/awx/test_workflow_job_template.py @@ -18,6 +18,8 @@ def test_create_workflow_job_template(run_module, admin_user, organization, surv 'survey_spec': survey_spec, 'survey_enabled': True, 'state': 'present', + 'job_tags': '', + 'skip_tags': '', }, admin_user, ) @@ -35,7 +37,16 @@ def test_create_workflow_job_template(run_module, admin_user, organization, surv @pytest.mark.django_db def test_create_modify_no_survey(run_module, admin_user, organization, survey_spec): - result = run_module('workflow_job_template', {'name': 'foo-workflow', 'organization': organization.name}, admin_user) + result = run_module( + 'workflow_job_template', + { + 'name': 'foo-workflow', + 'organization': organization.name, + 'job_tags': '', + 'skip_tags': '', + }, + admin_user, + ) assert not result.get('failed', False), result.get('msg', result) assert result.get('changed', False), result diff --git a/awx_collection/tests/integration/targets/lookup_api_plugin/tasks/main.yml b/awx_collection/tests/integration/targets/lookup_api_plugin/tasks/main.yml index 7ecfa84a8c..5abed9dcd4 100644 --- a/awx_collection/tests/integration/targets/lookup_api_plugin/tasks/main.yml +++ b/awx_collection/tests/integration/targets/lookup_api_plugin/tasks/main.yml @@ -40,7 +40,7 @@ - assert: that: - - "'DNE' in results.msg" + - "'dne' in (results.msg | lower)" - name: Create our hosts host: diff --git a/awx_collection/tests/integration/targets/schedule/tasks/main.yml b/awx_collection/tests/integration/targets/schedule/tasks/main.yml index ec6242a129..73343faf94 100644 --- a/awx_collection/tests/integration/targets/schedule/tasks/main.yml +++ b/awx_collection/tests/integration/targets/schedule/tasks/main.yml @@ -7,11 +7,20 @@ set_fact: org_name: "AWX-Collection-tests-organization-org-{{ test_id }}" sched1: "AWX-Collection-tests-schedule-sched1-{{ test_id }}" + sched2: "AWX-Collection-tests-schedule-sched2-{{ test_id }}" cred1: "AWX-Collection-tests-schedule-cred1-{{ test_id }}" proj1: "AWX-Collection-tests-schedule-proj1-{{ test_id }}" proj2: "AWX-Collection-tests-schedule-proj2-{{ test_id }}" jt1: "AWX-Collection-tests-schedule-jt1-{{ test_id }}" jt2: "AWX-Collection-tests-schedule-jt1-{{ test_id }}" + ee1: "AWX-Collection-tests-schedule-ee1-{{ test_id }}" + label1: "AWX-Collection-tests-schedule-l1-{{ test_id }}" + label2: "AWX-Collection-tests-schedule-l2-{{ test_id }}" + ig1: "AWX-Collection-tests-schedule-ig1-{{ test_id }}" + ig2: "AWX-Collection-tests-schedule-ig2-{{ test_id }}" + slice_inventory: "AWX-Collection-tests-schedule-slice-inv-{{ test_id }}" + host_name: "AWX-Collection-tests-schedule-host-{{ test_id }}" + slice_num: 10 - block: - name: Try to create without an rrule @@ -124,6 +133,12 @@ ask_limit_on_launch: true ask_diff_mode_on_launch: true ask_verbosity_on_launch: true + ask_execution_environment_on_launch: true + ask_forks_on_launch: true + ask_instance_groups_on_launch: true + ask_job_slice_count_on_launch: true + ask_labels_on_launch: true + ask_timeout_on_launch: true job_type: run state: present register: result @@ -132,17 +147,53 @@ that: - "result is changed" + - name: Create labels + label: + name: "{{ item }}" + organization: "{{ org_name }}" + loop: + - "{{ label1 }}" + - "{{ label2 }}" + + - name: Create an execution environment + execution_environment: + name: "{{ ee1 }}" + image: "junk" + + - name: Create instance groups + instance_group: + name: "{{ item }}" + loop: + - "{{ ig1 }}" + - "{{ ig2 }}" + + - name: Create proper inventory for slice count + inventory: + name: "{{ slice_inventory }}" + organization: "{{ org_name }}" + state: present + register: result + + - name: Create a Host + host: + name: "{{ host_name }}-{{ item }}" + inventory: "{{ slice_inventory }}" + state: present + variables: + ansible_connection: local + loop: "{{ range(slice_num)|list }}" + register: result - name: Create with options that the JT does support schedule: - name: "{{ sched1 }}" + name: "{{ sched2 }}" state: present unified_job_template: "{{ jt1 }}" rrule: "DTSTART:20191219T130551Z RRULE:FREQ=WEEKLY;INTERVAL=1;COUNT=1" - description: "This hopefully will not work" + description: "This hopefully will work" extra_data: some: var - inventory: Demo Inventory + inventory: "{{ slice_inventory }}" scm_branch: asdf1234 credentials: - "{{ cred1 }}" @@ -153,6 +204,33 @@ diff_mode: true verbosity: 4 enabled: true + execution_environment: "{{ ee1 }}" + forks: 10 + instance_groups: + - "{{ ig1 }}" + - "{{ ig2 }}" + job_slice_count: "{{ slice_num }}" + labels: + - "{{ label1 }}" + - "{{ label2 }}" + timeout: 10 + register: result + ignore_errors: true + + - assert: + that: + - "result is changed" + + - name: Reset some options + schedule: + name: "{{ sched2 }}" + state: present + execution_environment: "" + forks: 1 + instance_groups: [] + job_slice_count: 1 + labels: [] + timeout: 60 register: result ignore_errors: true @@ -163,7 +241,7 @@ - name: Disable a schedule schedule: name: "{{ sched1 }}" - unified_job_template: "{{ jt1 }}" + unified_job_template: "Demo Job Template" state: present enabled: "false" register: result @@ -213,42 +291,48 @@ - result is changed always: - - name: Delete the schedule + - name: Delete the schedules schedule: - name: "{{ sched1 }}" + name: "{{ item }}" state: absent + loop: + - "{{ sched1 }}" + - "{{ sched2 }}" + ignore_errors: True - - name: Delete the jt + - name: Delete the jt1 job_template: name: "{{ jt1 }}" project: "{{ proj1 }}" playbook: hello_world.yml state: absent + ignore_errors: True - - name: Delete the jt + - name: Delete the jt2 job_template: name: "{{ jt2 }}" project: "{{ proj2 }}" playbook: hello_world.yml state: absent + ignore_errors: True - - name: Delete the Project + - name: Delete the Project2 project: name: "{{ proj2 }}" organization: "{{ org_name }}" state: absent scm_type: git scm_url: https://github.com/ansible/ansible-tower-samples.git - register: result + ignore_errors: True - - name: Delete the Project + - name: Delete the Project1 project: name: "{{ proj1 }}" organization: Default state: absent scm_type: git scm_url: https://github.com/ansible/ansible-tower-samples.git - register: result + ignore_errors: True - name: Delete Credential1 credential: @@ -256,9 +340,43 @@ organization: Default credential_type: Red Hat Ansible Automation Platform state: absent + ignore_errors: True + + # Labels can not be deleted + + - name: Delete an execution environment + execution_environment: + name: "{{ ee1 }}" + image: "junk" + state: absent + ignore_errors: True + + - name: Delete instance groups + instance_group: + name: "{{ item }}" + state: absent + loop: + - "{{ ig1 }}" + - "{{ ig2 }}" + ignore_errors: True - name: "Remove the organization" organization: name: "{{ org_name }}" state: absent - register: result + ignore_errors: True + + - name: "Delete slice inventory" + inventory: + name: "{{ slice_inventory }}" + organization: "{{ org_name }}" + state: absent + ignore_errors: True + + - name: Delete slice hosts + host: + name: "{{ host_name }}-{{ item }}" + inventory: "{{ slice_inventory }}" + state: absent + loop: "{{ range(slice_num)|list }}" + ignore_errors: True diff --git a/awx_collection/tests/integration/targets/workflow_job_template/tasks/main.yml b/awx_collection/tests/integration/targets/workflow_job_template/tasks/main.yml index 38119ecfe8..cc44becf99 100644 --- a/awx_collection/tests/integration/targets/workflow_job_template/tasks/main.yml +++ b/awx_collection/tests/integration/targets/workflow_job_template/tasks/main.yml @@ -20,6 +20,12 @@ project_inv: "AWX-Collection-tests-inventory_source-inv-project-{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}" project_inv_source: "AWX-Collection-tests-inventory_source-inv-source-project-{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}" github_webhook_credential_name: "AWX-Collection-tests-credential-webhook-{{ test_id }}_github" + ee1: "AWX-Collection-tests-workflow_job_template-ee1-{{ test_id }}" + label1: "AWX-Collection-tests-workflow_job_template-l1-{{ test_id }}" + label2: "AWX-Collection-tests-workflow_job_template-l2-{{ test_id }}" + ig1: "AWX-Collection-tests-workflow_job_template-ig1-{{ test_id }}" + ig2: "AWX-Collection-tests-workflow_job_template-ig2-{{ test_id }}" + host1: "AWX-Collection-tests-workflow_job_template-h1-{{ test_id }}" - block: - name: "Create a new organization" @@ -145,6 +151,18 @@ that: - "project_inv_source_result is changed" + - name: Add a node to demo inventory so we can use a slice count properly + host: + name: "{{ host1 }}" + inventory: Demo Inventory + variables: + ansible_connection: local + register: results + + - assert: + that: + - "result is changed" + - name: Create a Job Template job_template: name: "{{ jt1_name }}" @@ -181,6 +199,12 @@ playbook: hello_world.yml job_type: run state: present + ask_execution_environment_on_launch: true + ask_forks_on_launch: true + ask_instance_groups_on_launch: true + ask_timeout_on_launch: true + ask_job_slice_count_on_launch: true + ask_labels_on_launch: true register: jt2_name_result - assert: @@ -198,6 +222,12 @@ state: present survey_enabled: true survey_spec: '{"spec": [{"index": 0, "question_name": "my question?", "default": "mydef", "variable": "myvar", "type": "text", "required": false}], "description": "test", "name": "test"}' + ask_execution_environment_on_launch: true + ask_forks_on_launch: true + ask_instance_groups_on_launch: true + ask_timeout_on_launch: true + ask_job_slice_count_on_launch: true + ask_labels_on_launch: true register: result - assert: @@ -255,6 +285,26 @@ that: - "result is changed" + - name: Create labels + label: + name: "{{ item }}" + organization: "{{ org_name }}" + loop: + - "{{ label1 }}" + - "{{ label2 }}" + + - name: Create an execution environment + execution_environment: + name: "{{ ee1 }}" + image: "junk" + + - name: Create instance groups + instance_group: + name: "{{ item }}" + loop: + - "{{ ig1 }}" + - "{{ ig2 }}" + # Node actions do what the schema command used to do - name: Create leaf node workflow_job_template_node: @@ -262,6 +312,39 @@ unified_job_template: "{{ jt2_name }}" lookup_organization: "{{ org_name }}" workflow: "{{ wfjt_name }}" + execution_environment: "{{ ee1 }}" + forks: 12 + instance_groups: + - "{{ ig1 }}" + - "{{ ig2 }}" + job_slice_count: 2 + labels: + - "{{ label1 }}" + - "{{ label2 }}" + timeout: 23 + register: results + + - assert: + that: + - "results is changed" + + - name: Update prompts on leaf node + workflow_job_template_node: + identifier: leaf + unified_job_template: "{{ jt2_name }}" + lookup_organization: "{{ org_name }}" + workflow: "{{ wfjt_name }}" + execution_environment: "" + forks: 1 + instance_groups: [] + job_slice_count: 1 + labels: [] + timeout: 10 + register: results + + - assert: + that: + - "results is changed" - name: Create root node workflow_job_template_node: @@ -815,8 +898,33 @@ state: absent ignore_errors: True + # Labels can not be deleted + + - name: Delete an execution environment + execution_environment: + name: "{{ ee1 }}" + image: "junk" + state: absent + ignore_errors: True + + - name: Delete instance groups + instance_group: + name: "{{ item }}" + state: absent + loop: + - "{{ ig1 }}" + - "{{ ig2 }}" + ignore_errors: True + - name: "Remove the organization" organization: name: "{{ org_name }}" state: absent ignore_errors: True + + - name: Remove node + host: + name: "{{ host1 }}" + inventory: Demo Inventory + state: absent + ignore_errors: True diff --git a/awxkit/awxkit/api/pages/job_templates.py b/awxkit/awxkit/api/pages/job_templates.py index c93c88af97..46862d9f2b 100644 --- a/awxkit/awxkit/api/pages/job_templates.py +++ b/awxkit/awxkit/api/pages/job_templates.py @@ -54,6 +54,12 @@ class JobTemplate(HasCopy, HasCreate, HasInstanceGroups, HasNotifications, HasSu 'ask_tags_on_launch', 'ask_variables_on_launch', 'ask_verbosity_on_launch', + 'ask_execution_environment_on_launch', + 'ask_labels_on_launch', + 'ask_forks_on_launch', + 'ask_job_slice_count_on_launch', + 'ask_timeout_on_launch', + 'ask_instance_groups_on_launch', 'allow_simultaneous', 'become_enabled', 'diff_mode', diff --git a/awxkit/awxkit/api/pages/schedules.py b/awxkit/awxkit/api/pages/schedules.py index af58d59548..3ff9e1c0bb 100644 --- a/awxkit/awxkit/api/pages/schedules.py +++ b/awxkit/awxkit/api/pages/schedules.py @@ -47,6 +47,14 @@ class Schedules(page.PageList, Schedule): with suppress(exc.NoContent): self.related.credentials.post(dict(id=cred.id, disassociate=True)) + def add_label(self, label): + with suppress(exc.NoContent): + self.related.labels.post(dict(id=label.id)) + + def add_instance_group(self, instance_group): + with suppress(exc.NoContent): + self.related.instance_groups.post(dict(id=instance_group.id)) + page.register_page([resources.schedules, resources.related_schedules], Schedules) diff --git a/awxkit/awxkit/api/pages/unified_job_templates.py b/awxkit/awxkit/api/pages/unified_job_templates.py index 52e2b82c93..e7499e23e6 100644 --- a/awxkit/awxkit/api/pages/unified_job_templates.py +++ b/awxkit/awxkit/api/pages/unified_job_templates.py @@ -19,6 +19,10 @@ class UnifiedJobTemplate(HasStatus, base.Base): 'job_type', 'verbosity', 'inventory', + 'forks', + 'timeout', + 'job_slice_count', + 'execution_environment', ) def __str__(self): diff --git a/awxkit/awxkit/api/pages/workflow_job_template_nodes.py b/awxkit/awxkit/api/pages/workflow_job_template_nodes.py index bb568af9af..3177c24e50 100644 --- a/awxkit/awxkit/api/pages/workflow_job_template_nodes.py +++ b/awxkit/awxkit/api/pages/workflow_job_template_nodes.py @@ -32,6 +32,11 @@ class WorkflowJobTemplateNode(HasCreate, base.Base): 'extra_data', 'identifier', 'all_parents_must_converge', + # prompt fields for JTs + 'job_slice_count', + 'forks', + 'timeout', + 'execution_environment', ) update_payload(payload, optional_fields, kwargs) @@ -92,6 +97,14 @@ class WorkflowJobTemplateNode(HasCreate, base.Base): candidates = workflow_job.get_related('workflow_nodes', identifier=self.identifier) return candidates.results.pop() + def add_label(self, label): + with suppress(exc.NoContent): + self.related.labels.post(dict(id=label.id)) + + def add_instance_group(self, instance_group): + with suppress(exc.NoContent): + self.related.instance_groups.post(dict(id=instance_group.id)) + page.register_page( [resources.workflow_job_template_node, (resources.workflow_job_template_nodes, 'post'), (resources.workflow_job_template_workflow_nodes, 'post')], diff --git a/awxkit/awxkit/api/pages/workflow_job_templates.py b/awxkit/awxkit/api/pages/workflow_job_templates.py index 0eea75fd6f..42a691d5ea 100644 --- a/awxkit/awxkit/api/pages/workflow_job_templates.py +++ b/awxkit/awxkit/api/pages/workflow_job_templates.py @@ -42,6 +42,9 @@ class WorkflowJobTemplate(HasCopy, HasCreate, HasNotifications, HasSurvey, Unifi "ask_inventory_on_launch", "ask_scm_branch_on_launch", "ask_limit_on_launch", + "ask_labels_on_launch", + "ask_skip_tags_on_launch", + "ask_tags_on_launch", "limit", "scm_branch", "survey_enabled", diff --git a/awxkit/awxkit/cli/custom.py b/awxkit/awxkit/cli/custom.py index e65b20e852..b1bd6de93f 100644 --- a/awxkit/awxkit/cli/custom.py +++ b/awxkit/awxkit/cli/custom.py @@ -50,7 +50,7 @@ class Launchable(object): if with_pk: parser.choices[self.action].add_argument('id', type=functools.partial(pk_or_name, None, self.resource, page=self.page), help='') parser.choices[self.action].add_argument('--monitor', action='store_true', help='If set, prints stdout of the launched job until it finishes.') - parser.choices[self.action].add_argument('--timeout', type=int, help='If set with --monitor or --wait, time out waiting on job completion.') # noqa + parser.choices[self.action].add_argument('--action-timeout', type=int, help='If set with --monitor or --wait, time out waiting on job completion.') parser.choices[self.action].add_argument('--wait', action='store_true', help='If set, waits until the launched job finishes.') launch_time_options = self.page.connection.options(self.page.endpoint + '1/{}/'.format(self.action)) @@ -66,7 +66,7 @@ class Launchable(object): response, self.page.connection.session, print_stdout=not kwargs.get('wait'), - timeout=kwargs.get('timeout'), + action_timeout=kwargs.get('action_timeout'), ) if status: response.json['status'] = status @@ -78,7 +78,7 @@ class Launchable(object): monitor_kwargs = { 'monitor': kwargs.pop('monitor', False), 'wait': kwargs.pop('wait', False), - 'timeout': kwargs.pop('timeout', False), + 'action_timeout': kwargs.pop('action_timeout', False), } response = self.page.get().related.get(self.action).post(kwargs) self.monitor(response, **monitor_kwargs) diff --git a/awxkit/awxkit/cli/stdout.py b/awxkit/awxkit/cli/stdout.py index 5de134ccc4..ea64f72aa0 100644 --- a/awxkit/awxkit/cli/stdout.py +++ b/awxkit/awxkit/cli/stdout.py @@ -9,7 +9,7 @@ from .utils import cprint, color_enabled, STATUS_COLORS from awxkit.utils import to_str -def monitor_workflow(response, session, print_stdout=True, timeout=None, interval=0.25): +def monitor_workflow(response, session, print_stdout=True, action_timeout=None, interval=0.25): get = response.url.get payload = { 'order_by': 'finished', @@ -46,9 +46,9 @@ def monitor_workflow(response, session, print_stdout=True, timeout=None, interva started = time.time() seen = set() while True: - if timeout and time.time() - started > timeout: + if action_timeout and time.time() - started > action_timeout: if print_stdout: - cprint('Monitoring aborted due to timeout.', 'red') + cprint('Monitoring aborted due to action-timeout.', 'red') break if sys.stdout.isatty(): @@ -68,7 +68,7 @@ def monitor_workflow(response, session, print_stdout=True, timeout=None, interva return get().json.status -def monitor(response, session, print_stdout=True, timeout=None, interval=0.25): +def monitor(response, session, print_stdout=True, action_timeout=None, interval=0.25): get = response.url.get payload = {'order_by': 'start_line', 'no_truncate': True} if response.type == 'job': @@ -97,9 +97,9 @@ def monitor(response, session, print_stdout=True, timeout=None, interval=0.25): started = time.time() while True: - if timeout and time.time() - started > timeout: + if action_timeout and time.time() - started > action_timeout: if print_stdout: - cprint('Monitoring aborted due to timeout.', 'red') + cprint('Monitoring aborted due to action-timeout.', 'red') break next_line = fetch(next_line) if next_line: