Merge pull request #12875 from ansible/feature-prompt-on-launch-on-templates

Feature prompt on launch on templates
This commit is contained in:
Alan Rominger
2022-09-23 09:16:02 -04:00
committed by GitHub
122 changed files with 5034 additions and 943 deletions

View File

@@ -63,7 +63,6 @@ __all__ = [
'SubDetailAPIView', 'SubDetailAPIView',
'ResourceAccessList', 'ResourceAccessList',
'ParentMixin', 'ParentMixin',
'DeleteLastUnattachLabelMixin',
'SubListAttachDetachAPIView', 'SubListAttachDetachAPIView',
'CopyAPIView', 'CopyAPIView',
'BaseUsersList', 'BaseUsersList',
@@ -775,28 +774,6 @@ class SubListAttachDetachAPIView(SubListCreateAttachDetachAPIView):
return {'id': None} return {'id': None}
class DeleteLastUnattachLabelMixin(object):
"""
Models for which you want the last instance to be deleted from the database
when the last disassociate is called should inherit from this class. Further,
the model should implement is_detached()
"""
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super(DeleteLastUnattachLabelMixin, self).unattach_validate(request)
if res:
return res
res = super(DeleteLastUnattachLabelMixin, self).unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView): class SubDetailAPIView(ParentMixin, generics.RetrieveAPIView, GenericAPIView):
pass pass

View File

@@ -2923,6 +2923,12 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
'survey_enabled', 'survey_enabled',
'become_enabled', 'become_enabled',
'diff_mode', 'diff_mode',
@@ -3185,7 +3191,7 @@ class JobRelaunchSerializer(BaseSerializer):
return attrs return attrs
class JobCreateScheduleSerializer(BaseSerializer): class JobCreateScheduleSerializer(LabelsListMixin, BaseSerializer):
can_schedule = serializers.SerializerMethodField() can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField() prompts = serializers.SerializerMethodField()
@@ -3211,11 +3217,14 @@ class JobCreateScheduleSerializer(BaseSerializer):
try: try:
config = obj.launch_config config = obj.launch_config
ret = config.prompts_dict(display=True) ret = config.prompts_dict(display=True)
if 'inventory' in ret: for field_name in ('inventory', 'execution_environment'):
ret['inventory'] = self._summarize('inventory', ret['inventory']) if field_name in ret:
if 'credentials' in ret: ret[field_name] = self._summarize(field_name, ret[field_name])
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']] for field_name, singular in (('credentials', 'credential'), ('instance_groups', 'instance_group')):
ret['credentials'] = all_creds if field_name in ret:
ret[field_name] = [self._summarize(singular, obj) for obj in ret[field_name]]
if 'labels' in ret:
ret['labels'] = self._summary_field_labels(config)
return ret return ret
except JobLaunchConfig.DoesNotExist: except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')} return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
@@ -3388,6 +3397,9 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta: class Meta:
model = WorkflowJobTemplate model = WorkflowJobTemplate
fields = ( fields = (
@@ -3406,6 +3418,11 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'-execution_environment', '-execution_environment',
'ask_labels_on_launch',
'ask_skip_tags_on_launch',
'ask_tags_on_launch',
'skip_tags',
'job_tags',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -3449,7 +3466,7 @@ class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJo
# process char_prompts, these are not direct fields on the model # process char_prompts, these are not direct fields on the model
mock_obj = self.Meta.model() mock_obj = self.Meta.model()
for field_name in ('scm_branch', 'limit'): for field_name in ('scm_branch', 'limit', 'skip_tags', 'job_tags'):
if field_name in attrs: if field_name in attrs:
setattr(mock_obj, field_name, attrs[field_name]) setattr(mock_obj, field_name, attrs[field_name])
attrs.pop(field_name) attrs.pop(field_name)
@@ -3475,6 +3492,9 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) scm_branch = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
class Meta: class Meta:
model = WorkflowJob model = WorkflowJob
fields = ( fields = (
@@ -3494,6 +3514,8 @@ class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
'webhook_service', 'webhook_service',
'webhook_credential', 'webhook_credential',
'webhook_guid', 'webhook_guid',
'skip_tags',
'job_tags',
) )
def get_related(self, obj): def get_related(self, obj):
@@ -3610,6 +3632,9 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None) skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None) diff_mode = serializers.BooleanField(required=False, allow_null=True, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES) verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None, choices=VERBOSITY_CHOICES)
forks = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
job_slice_count = serializers.IntegerField(required=False, allow_null=True, min_value=0, default=None)
timeout = serializers.IntegerField(required=False, allow_null=True, default=None)
exclude_errors = () exclude_errors = ()
class Meta: class Meta:
@@ -3625,13 +3650,21 @@ class LaunchConfigurationBaseSerializer(BaseSerializer):
'skip_tags', 'skip_tags',
'diff_mode', 'diff_mode',
'verbosity', 'verbosity',
'execution_environment',
'forks',
'job_slice_count',
'timeout',
) )
def get_related(self, obj): def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj) res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id: if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id}) res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.execution_environment_id:
res['execution_environment'] = self.reverse('api:execution_environment_detail', kwargs={'pk': obj.execution_environment_id})
res['labels'] = self.reverse('api:{}_labels_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk}) res['credentials'] = self.reverse('api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:{}_instance_groups_list'.format(get_type_for_model(self.Meta.model)), kwargs={'pk': obj.pk})
return res return res
def _build_mock_obj(self, attrs): def _build_mock_obj(self, attrs):
@@ -4083,7 +4116,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
class JobLaunchSerializer(BaseSerializer): class JobLaunchSerializer(BaseSerializer):
# Representational fields # Representational fields
passwords_needed_to_start = serializers.ReadOnlyField() passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True) can_start_without_user_input = serializers.BooleanField(read_only=True)
@@ -4106,6 +4138,12 @@ class JobLaunchSerializer(BaseSerializer):
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True) skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True) limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True) verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
forks = serializers.IntegerField(required=False, write_only=True, min_value=0)
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0)
timeout = serializers.IntegerField(required=False, write_only=True)
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
class Meta: class Meta:
model = JobTemplate model = JobTemplate
@@ -4133,6 +4171,12 @@ class JobLaunchSerializer(BaseSerializer):
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
'survey_enabled', 'survey_enabled',
'variables_needed_to_start', 'variables_needed_to_start',
'credential_needed_to_start', 'credential_needed_to_start',
@@ -4140,6 +4184,12 @@ class JobLaunchSerializer(BaseSerializer):
'job_template_data', 'job_template_data',
'defaults', 'defaults',
'verbosity', 'verbosity',
'execution_environment',
'labels',
'forks',
'job_slice_count',
'timeout',
'instance_groups',
) )
read_only_fields = ( read_only_fields = (
'ask_scm_branch_on_launch', 'ask_scm_branch_on_launch',
@@ -4152,6 +4202,12 @@ class JobLaunchSerializer(BaseSerializer):
'ask_verbosity_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'ask_credential_on_launch',
'ask_execution_environment_on_launch',
'ask_labels_on_launch',
'ask_forks_on_launch',
'ask_job_slice_count_on_launch',
'ask_timeout_on_launch',
'ask_instance_groups_on_launch',
) )
def get_credential_needed_to_start(self, obj): def get_credential_needed_to_start(self, obj):
@@ -4176,6 +4232,17 @@ class JobLaunchSerializer(BaseSerializer):
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields: if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None) cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict) defaults_dict.setdefault(field_name, []).append(cred_dict)
elif field_name == 'execution_environment':
if obj.execution_environment_id:
defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name}
else:
defaults_dict[field_name] = {}
elif field_name == 'labels':
for label in obj.labels.all():
label_dict = {'id': label.id, 'name': label.name}
defaults_dict.setdefault(field_name, []).append(label_dict)
elif field_name == 'instance_groups':
defaults_dict[field_name] = []
else: else:
defaults_dict[field_name] = getattr(obj, field_name) defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict return defaults_dict
@@ -4283,6 +4350,10 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True) scm_branch = serializers.CharField(required=False, write_only=True, allow_blank=True)
workflow_job_template_data = serializers.SerializerMethodField() workflow_job_template_data = serializers.SerializerMethodField()
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
class Meta: class Meta:
model = WorkflowJobTemplate model = WorkflowJobTemplate
fields = ( fields = (
@@ -4302,8 +4373,22 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
'workflow_job_template_data', 'workflow_job_template_data',
'survey_enabled', 'survey_enabled',
'ask_variables_on_launch', 'ask_variables_on_launch',
'ask_labels_on_launch',
'labels',
'ask_skip_tags_on_launch',
'ask_tags_on_launch',
'skip_tags',
'job_tags',
)
read_only_fields = (
'ask_inventory_on_launch',
'ask_variables_on_launch',
'ask_skip_tags_on_launch',
'ask_labels_on_launch',
'ask_limit_on_launch',
'ask_scm_branch_on_launch',
'ask_tags_on_launch',
) )
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj): def get_survey_enabled(self, obj):
if obj: if obj:
@@ -4311,10 +4396,15 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return False return False
def get_defaults(self, obj): def get_defaults(self, obj):
defaults_dict = {} defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys(): for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory': if field_name == 'inventory':
defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None)) defaults_dict[field_name] = dict(name=getattrd(obj, '%s.name' % field_name, None), id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'labels':
for label in obj.labels.all():
label_dict = {"id": label.id, "name": label.name}
defaults_dict.setdefault(field_name, []).append(label_dict)
else: else:
defaults_dict[field_name] = getattr(obj, field_name) defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict return defaults_dict
@@ -4323,6 +4413,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
return dict(name=obj.name, id=obj.id, description=obj.description) return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs): def validate(self, attrs):
template = self.instance template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs) accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
@@ -4340,6 +4431,7 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
WFJT_inventory = template.inventory WFJT_inventory = template.inventory
WFJT_limit = template.limit WFJT_limit = template.limit
WFJT_scm_branch = template.scm_branch WFJT_scm_branch = template.scm_branch
super(WorkflowJobLaunchSerializer, self).validate(attrs) super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory template.inventory = WFJT_inventory
@@ -4731,6 +4823,8 @@ class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSeria
if isinstance(obj.unified_job_template, SystemJobTemplate): if isinstance(obj.unified_job_template, SystemJobTemplate):
summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type summary_fields['unified_job_template']['job_type'] = obj.unified_job_template.job_type
# We are not showing instance groups on summary fields because JTs don't either
if 'inventory' in summary_fields: if 'inventory' in summary_fields:
return summary_fields return summary_fields

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import LabelList, LabelDetail from awx.api.views.labels import LabelList, LabelDetail
urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')] urls = [re_path(r'^$', LabelList.as_view(), name='label_list'), re_path(r'^(?P<pk>[0-9]+)/$', LabelDetail.as_view(), name='label_detail')]

View File

@@ -3,7 +3,7 @@
from django.urls import re_path from django.urls import re_path
from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList from awx.api.views import ScheduleList, ScheduleDetail, ScheduleUnifiedJobsList, ScheduleCredentialsList, ScheduleLabelsList, ScheduleInstanceGroupList
urls = [ urls = [
@@ -11,6 +11,8 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'), re_path(r'^(?P<pk>[0-9]+)/$', ScheduleDetail.as_view(), name='schedule_detail'),
re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'), re_path(r'^(?P<pk>[0-9]+)/jobs/$', ScheduleUnifiedJobsList.as_view(), name='schedule_unified_jobs_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', ScheduleCredentialsList.as_view(), name='schedule_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', ScheduleLabelsList.as_view(), name='schedule_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', ScheduleInstanceGroupList.as_view(), name='schedule_instance_groups_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -10,6 +10,8 @@ from awx.api.views import (
WorkflowJobNodeFailureNodesList, WorkflowJobNodeFailureNodesList,
WorkflowJobNodeAlwaysNodesList, WorkflowJobNodeAlwaysNodesList,
WorkflowJobNodeCredentialsList, WorkflowJobNodeCredentialsList,
WorkflowJobNodeLabelsList,
WorkflowJobNodeInstanceGroupsList,
) )
@@ -20,6 +22,8 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobNodeFailureNodesList.as_view(), name='workflow_job_node_failure_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobNodeAlwaysNodesList.as_view(), name='workflow_job_node_always_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobNodeCredentialsList.as_view(), name='workflow_job_node_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobNodeLabelsList.as_view(), name='workflow_job_node_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobNodeInstanceGroupsList.as_view(), name='workflow_job_node_instance_groups_list'),
] ]
__all__ = ['urls'] __all__ = ['urls']

View File

@@ -11,6 +11,8 @@ from awx.api.views import (
WorkflowJobTemplateNodeAlwaysNodesList, WorkflowJobTemplateNodeAlwaysNodesList,
WorkflowJobTemplateNodeCredentialsList, WorkflowJobTemplateNodeCredentialsList,
WorkflowJobTemplateNodeCreateApproval, WorkflowJobTemplateNodeCreateApproval,
WorkflowJobTemplateNodeLabelsList,
WorkflowJobTemplateNodeInstanceGroupsList,
) )
@@ -21,6 +23,8 @@ urls = [
re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/failure_nodes/$', WorkflowJobTemplateNodeFailureNodesList.as_view(), name='workflow_job_template_node_failure_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'), re_path(r'^(?P<pk>[0-9]+)/always_nodes/$', WorkflowJobTemplateNodeAlwaysNodesList.as_view(), name='workflow_job_template_node_always_nodes_list'),
re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'), re_path(r'^(?P<pk>[0-9]+)/credentials/$', WorkflowJobTemplateNodeCredentialsList.as_view(), name='workflow_job_template_node_credentials_list'),
re_path(r'^(?P<pk>[0-9]+)/labels/$', WorkflowJobTemplateNodeLabelsList.as_view(), name='workflow_job_template_node_labels_list'),
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', WorkflowJobTemplateNodeInstanceGroupsList.as_view(), name='workflow_job_template_node_instance_groups_list'),
re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'), re_path(r'^(?P<pk>[0-9]+)/create_approval_template/$', WorkflowJobTemplateNodeCreateApproval.as_view(), name='workflow_job_template_node_create_approval'),
] ]

View File

@@ -22,6 +22,7 @@ from django.conf import settings
from django.core.exceptions import FieldError, ObjectDoesNotExist from django.core.exceptions import FieldError, ObjectDoesNotExist
from django.db.models import Q, Sum from django.db.models import Q, Sum
from django.db import IntegrityError, ProgrammingError, transaction, connection from django.db import IntegrityError, ProgrammingError, transaction, connection
from django.db.models.fields.related import ManyToManyField, ForeignKey
from django.shortcuts import get_object_or_404 from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe from django.utils.safestring import mark_safe
from django.utils.timezone import now from django.utils.timezone import now
@@ -68,7 +69,6 @@ from awx.api.generics import (
APIView, APIView,
BaseUsersList, BaseUsersList,
CopyAPIView, CopyAPIView,
DeleteLastUnattachLabelMixin,
GenericAPIView, GenericAPIView,
ListAPIView, ListAPIView,
ListCreateAPIView, ListCreateAPIView,
@@ -85,6 +85,7 @@ from awx.api.generics import (
SubListCreateAttachDetachAPIView, SubListCreateAttachDetachAPIView,
SubListDestroyAPIView, SubListDestroyAPIView,
) )
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main import models from awx.main import models
from awx.main.utils import ( from awx.main.utils import (
@@ -617,6 +618,19 @@ class ScheduleCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.Schedule parent_model = models.Schedule
class ScheduleLabelsList(LabelSubListCreateAttachDetachView):
parent_model = models.Schedule
class ScheduleInstanceGroupList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.Schedule
relationship = 'instance_groups'
class ScheduleUnifiedJobsList(SubListAPIView): class ScheduleUnifiedJobsList(SubListAPIView):
model = models.UnifiedJob model = models.UnifiedJob
@@ -2381,10 +2395,13 @@ class JobTemplateLaunch(RetrieveAPIView):
for field, ask_field_name in modified_ask_mapping.items(): for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name): if not getattr(obj, ask_field_name):
data.pop(field, None) data.pop(field, None)
elif field == 'inventory': elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None) data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
elif field == 'credentials': elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
data[field] = [cred.id for cred in obj.credentials.all()] if field == 'instance_groups':
data[field] = []
continue
data[field] = [item.id for item in getattr(obj, field).all()]
else: else:
data[field] = getattr(obj, field) data[field] = getattr(obj, field)
return data return data
@@ -2719,28 +2736,9 @@ class JobTemplateCredentialsList(SubListCreateAttachDetachAPIView):
return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created) return super(JobTemplateCredentialsList, self).is_valid_relation(parent, sub, created)
class JobTemplateLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView): class JobTemplateLabelList(LabelSubListCreateAttachDetachView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.JobTemplate parent_model = models.JobTemplate
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = models.Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if models.Label.objects.filter(unifiedjobtemplate_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(JobTemplateLabelList, self).post(request, *args, **kwargs)
class JobTemplateCallback(GenericAPIView): class JobTemplateCallback(GenericAPIView):
@@ -2966,6 +2964,22 @@ class WorkflowJobNodeCredentialsList(SubListAPIView):
relationship = 'credentials' relationship = 'credentials'
class WorkflowJobNodeLabelsList(SubListAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
parent_model = models.WorkflowJobNode
relationship = 'labels'
class WorkflowJobNodeInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.WorkflowJobNode
relationship = 'instance_groups'
class WorkflowJobTemplateNodeList(ListCreateAPIView): class WorkflowJobTemplateNodeList(ListCreateAPIView):
model = models.WorkflowJobTemplateNode model = models.WorkflowJobTemplateNode
@@ -2984,6 +2998,19 @@ class WorkflowJobTemplateNodeCredentialsList(LaunchConfigCredentialsBase):
parent_model = models.WorkflowJobTemplateNode parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeLabelsList(LabelSubListCreateAttachDetachView):
parent_model = models.WorkflowJobTemplateNode
class WorkflowJobTemplateNodeInstanceGroupsList(SubListAttachDetachAPIView):
model = models.InstanceGroup
serializer_class = serializers.InstanceGroupSerializer
parent_model = models.WorkflowJobTemplateNode
relationship = 'instance_groups'
class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView): class WorkflowJobTemplateNodeChildrenBaseList(EnforceParentRelationshipMixin, SubListCreateAttachDetachAPIView):
model = models.WorkflowJobTemplateNode model = models.WorkflowJobTemplateNode
@@ -3196,13 +3223,17 @@ class WorkflowJobTemplateLaunch(RetrieveAPIView):
data['extra_vars'] = extra_vars data['extra_vars'] = extra_vars
modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping() modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping()
modified_ask_mapping.pop('extra_vars') modified_ask_mapping.pop('extra_vars')
for field_name, ask_field_name in obj.get_ask_mapping().items():
for field, ask_field_name in modified_ask_mapping.items():
if not getattr(obj, ask_field_name): if not getattr(obj, ask_field_name):
data.pop(field_name, None) data.pop(field, None)
elif field_name == 'inventory': elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None) data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
data[field] = [item.id for item in getattr(obj, field).all()]
else: else:
data[field_name] = getattr(obj, field_name) data[field] = getattr(obj, field)
return data return data
def post(self, request, *args, **kwargs): def post(self, request, *args, **kwargs):
@@ -3689,15 +3720,21 @@ class JobCreateSchedule(RetrieveAPIView):
extra_data=config.extra_data, extra_data=config.extra_data,
survey_passwords=config.survey_passwords, survey_passwords=config.survey_passwords,
inventory=config.inventory, inventory=config.inventory,
execution_environment=config.execution_environment,
char_prompts=config.char_prompts, char_prompts=config.char_prompts,
credentials=set(config.credentials.all()), credentials=set(config.credentials.all()),
labels=set(config.labels.all()),
instance_groups=list(config.instance_groups.all()),
) )
if not request.user.can_access(models.Schedule, 'add', schedule_data): if not request.user.can_access(models.Schedule, 'add', schedule_data):
raise PermissionDenied() raise PermissionDenied()
creds_list = schedule_data.pop('credentials') related_fields = ('credentials', 'labels', 'instance_groups')
related = [schedule_data.pop(relationship) for relationship in related_fields]
schedule = models.Schedule.objects.create(**schedule_data) schedule = models.Schedule.objects.create(**schedule_data)
schedule.credentials.add(*creds_list) for relationship, items in zip(related_fields, related):
for item in items:
getattr(schedule, relationship).add(item)
data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data data = serializers.ScheduleSerializer(schedule, context=self.get_serializer_context()).data
data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model data.serializer.instance = None # hack to avoid permissions.py assuming this is Job model
@@ -4428,18 +4465,6 @@ class NotificationDetail(RetrieveAPIView):
serializer_class = serializers.NotificationSerializer serializer_class = serializers.NotificationSerializer
class LabelList(ListCreateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class LabelDetail(RetrieveUpdateAPIView):
model = models.Label
serializer_class = serializers.LabelSerializer
class ActivityStreamList(SimpleListAPIView): class ActivityStreamList(SimpleListAPIView):
model = models.ActivityStream model = models.ActivityStream

View File

@@ -18,8 +18,6 @@ from rest_framework import status
# AWX # AWX
from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate from awx.main.models import ActivityStream, Inventory, JobTemplate, Role, User, InstanceGroup, InventoryUpdateEvent, InventoryUpdate
from awx.main.models.label import Label
from awx.api.generics import ( from awx.api.generics import (
ListCreateAPIView, ListCreateAPIView,
RetrieveUpdateDestroyAPIView, RetrieveUpdateDestroyAPIView,
@@ -27,9 +25,8 @@ from awx.api.generics import (
SubListAttachDetachAPIView, SubListAttachDetachAPIView,
ResourceAccessList, ResourceAccessList,
CopyAPIView, CopyAPIView,
DeleteLastUnattachLabelMixin,
SubListCreateAttachDetachAPIView,
) )
from awx.api.views.labels import LabelSubListCreateAttachDetachView
from awx.api.serializers import ( from awx.api.serializers import (
@@ -39,7 +36,6 @@ from awx.api.serializers import (
InstanceGroupSerializer, InstanceGroupSerializer,
InventoryUpdateEventSerializer, InventoryUpdateEventSerializer,
JobTemplateSerializer, JobTemplateSerializer,
LabelSerializer,
) )
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin from awx.api.views.mixin import RelatedJobsPreventDeleteMixin
@@ -157,28 +153,9 @@ class InventoryJobTemplateList(SubListAPIView):
return qs.filter(inventory=parent) return qs.filter(inventory=parent)
class InventoryLabelList(DeleteLastUnattachLabelMixin, SubListCreateAttachDetachAPIView, SubListAPIView): class InventoryLabelList(LabelSubListCreateAttachDetachView):
model = Label
serializer_class = LabelSerializer
parent_model = Inventory parent_model = Inventory
relationship = 'labels'
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
if Label.objects.filter(inventory_labels=self.kwargs['pk']).count() > 100:
return Response(
dict(msg=_('Maximum number of labels for {} reached.'.format(self.parent_model._meta.verbose_name_raw))), status=status.HTTP_400_BAD_REQUEST
)
return super(InventoryLabelList, self).post(request, *args, **kwargs)
class InventoryCopy(CopyAPIView): class InventoryCopy(CopyAPIView):

71
awx/api/views/labels.py Normal file
View File

@@ -0,0 +1,71 @@
# AWX
from awx.api.generics import SubListCreateAttachDetachAPIView, RetrieveUpdateAPIView, ListCreateAPIView
from awx.main.models import Label
from awx.api.serializers import LabelSerializer
# Django
from django.utils.translation import gettext_lazy as _
# Django REST Framework
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
class LabelSubListCreateAttachDetachView(SubListCreateAttachDetachAPIView):
"""
For related labels lists like /api/v2/inventories/N/labels/
We want want the last instance to be deleted from the database
when the last disassociate happens.
Subclasses need to define parent_model
"""
model = Label
serializer_class = LabelSerializer
relationship = 'labels'
def unattach(self, request, *args, **kwargs):
(sub_id, res) = super().unattach_validate(request)
if res:
return res
res = super().unattach_by_id(request, sub_id)
obj = self.model.objects.get(id=sub_id)
if obj.is_detached():
obj.delete()
return res
def post(self, request, *args, **kwargs):
# If a label already exists in the database, attach it instead of erroring out
# that it already exists
if 'id' not in request.data and 'name' in request.data and 'organization' in request.data:
existing = Label.objects.filter(name=request.data['name'], organization_id=request.data['organization'])
if existing.exists():
existing = existing[0]
request.data['id'] = existing.id
del request.data['name']
del request.data['organization']
# Give a 400 error if we have attached too many labels to this object
label_filter = self.parent_model._meta.get_field(self.relationship).remote_field.name
if Label.objects.filter(**{label_filter: self.kwargs['pk']}).count() > 100:
return Response(dict(msg=_(f'Maximum number of labels for {self.parent_model._meta.verbose_name_raw} reached.')), status=HTTP_400_BAD_REQUEST)
return super().post(request, *args, **kwargs)
class LabelDetail(RetrieveUpdateAPIView):
model = Label
serializer_class = LabelSerializer
class LabelList(ListCreateAPIView):
name = _("Labels")
model = Label
serializer_class = LabelSerializer

View File

@@ -12,7 +12,7 @@ from django.conf import settings
from django.db.models import Q, Prefetch from django.db.models import Q, Prefetch
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _ from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
# Django REST Framework # Django REST Framework
from rest_framework.exceptions import ParseError, PermissionDenied from rest_framework.exceptions import ParseError, PermissionDenied
@@ -281,13 +281,23 @@ class BaseAccess(object):
""" """
return True return True
def assure_relationship_exists(self, obj, relationship):
if '.' in relationship:
return # not attempting validation for complex relationships now
try:
obj._meta.get_field(relationship)
except FieldDoesNotExist:
raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}')
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
self.assure_relationship_exists(obj, relationship)
if skip_sub_obj_read_check: if skip_sub_obj_read_check:
return self.can_change(obj, None) return self.can_change(obj, None)
else: else:
return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj)) return bool(self.can_change(obj, None) and self.user.can_access(type(sub_obj), 'read', sub_obj))
def can_unattach(self, obj, sub_obj, relationship, data=None): def can_unattach(self, obj, sub_obj, relationship, data=None):
self.assure_relationship_exists(obj, relationship)
return self.can_change(obj, data) return self.can_change(obj, data)
def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False): def check_related(self, field, Model, data, role_field='admin_role', obj=None, mandatory=False):
@@ -328,6 +338,8 @@ class BaseAccess(object):
role = getattr(resource, role_field, None) role = getattr(resource, role_field, None)
if role is None: if role is None:
# Handle special case where resource does not have direct roles # Handle special case where resource does not have direct roles
if role_field == 'read_role':
return self.user.can_access(type(resource), 'read', resource)
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field] access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
return self.user.can_access(type(resource), access_method_type, resource, None) return self.user.can_access(type(resource), access_method_type, resource, None)
return self.user in role return self.user in role
@@ -499,6 +511,21 @@ class BaseAccess(object):
return False return False
class UnifiedCredentialsMixin(BaseAccess):
"""
The credentials many-to-many is a standard relationship for JT, jobs, and others
Permission to attach is always use permission, and permission to unattach is admin to the parent object
"""
@check_superuser
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == 'credentials':
if not isinstance(sub_obj, Credential):
raise RuntimeError(f'Can only attach credentials to credentials relationship, got {type(sub_obj)}')
return self.can_change(obj, None) and (self.user in sub_obj.use_role)
return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
class NotificationAttachMixin(BaseAccess): class NotificationAttachMixin(BaseAccess):
"""For models that can have notifications attached """For models that can have notifications attached
@@ -1031,7 +1058,7 @@ class GroupAccess(BaseAccess):
return bool(obj and self.user in obj.inventory.admin_role) return bool(obj and self.user in obj.inventory.admin_role)
class InventorySourceAccess(NotificationAttachMixin, BaseAccess): class InventorySourceAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess):
""" """
I can see inventory sources whenever I can see their inventory. I can see inventory sources whenever I can see their inventory.
I can change inventory sources whenever I can change their inventory. I can change inventory sources whenever I can change their inventory.
@@ -1075,18 +1102,6 @@ class InventorySourceAccess(NotificationAttachMixin, BaseAccess):
return self.user in obj.inventory.update_role return self.user in obj.inventory.update_role
return False return False
@check_superuser
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return obj and obj.inventory and self.user in obj.inventory.admin_role and self.user in sub_obj.use_role
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return obj and obj.inventory and self.user in obj.inventory.admin_role
return super(InventorySourceAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
class InventoryUpdateAccess(BaseAccess): class InventoryUpdateAccess(BaseAccess):
""" """
@@ -1485,7 +1500,7 @@ class ProjectUpdateAccess(BaseAccess):
return obj and self.user in obj.project.admin_role return obj and self.user in obj.project.admin_role
class JobTemplateAccess(NotificationAttachMixin, BaseAccess): class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAccess):
""" """
I can see job templates when: I can see job templates when:
- I have read role for the job template. - I have read role for the job template.
@@ -1549,8 +1564,7 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
if self.user not in inventory.use_role: if self.user not in inventory.use_role:
return False return False
ee = get_value(ExecutionEnvironment, 'execution_environment') if not self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role'):
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False return False
project = get_value(Project, 'project') project = get_value(Project, 'project')
@@ -1600,10 +1614,8 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
if self.changes_are_non_sensitive(obj, data): if self.changes_are_non_sensitive(obj, data):
return True return True
if data.get('execution_environment'): if not self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'):
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data) return False
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False
for required_field, cls in (('inventory', Inventory), ('project', Project)): for required_field, cls in (('inventory', Inventory), ('project', Project)):
is_mandatory = True is_mandatory = True
@@ -1667,17 +1679,13 @@ class JobTemplateAccess(NotificationAttachMixin, BaseAccess):
if not obj.organization: if not obj.organization:
return False return False
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
if relationship == 'credentials' and isinstance(sub_obj, Credential):
return self.user in obj.admin_role and self.user in sub_obj.use_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check) return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
@check_superuser @check_superuser
def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs): def can_unattach(self, obj, sub_obj, relationship, *args, **kwargs):
if relationship == "instance_groups": if relationship == "instance_groups":
return self.can_attach(obj, sub_obj, relationship, *args, **kwargs) return self.can_attach(obj, sub_obj, relationship, *args, **kwargs)
if relationship == 'credentials' and isinstance(sub_obj, Credential): return super(JobTemplateAccess, self).can_unattach(obj, sub_obj, relationship, *args, **kwargs)
return self.user in obj.admin_role
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
class JobAccess(BaseAccess): class JobAccess(BaseAccess):
@@ -1824,7 +1832,7 @@ class SystemJobAccess(BaseAccess):
return False # no relaunching of system jobs return False # no relaunching of system jobs
class JobLaunchConfigAccess(BaseAccess): class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
""" """
Launch configs must have permissions checked for Launch configs must have permissions checked for
- relaunching - relaunching
@@ -1832,63 +1840,69 @@ class JobLaunchConfigAccess(BaseAccess):
In order to create a new object with a copy of this launch config, I need: In order to create a new object with a copy of this launch config, I need:
- use access to related inventory (if present) - use access to related inventory (if present)
- read access to Execution Environment (if present), unless the specified ee is already in the template
- use role to many-related credentials (if any present) - use role to many-related credentials (if any present)
- read access to many-related labels (if any present), unless the specified label is already in the template
- read access to many-related instance groups (if any present), unless the specified instance group is already in the template
""" """
model = JobLaunchConfig model = JobLaunchConfig
select_related = 'job' select_related = 'job'
prefetch_related = ('credentials', 'inventory') prefetch_related = ('credentials', 'inventory')
def _unusable_creds_exist(self, qs): M2M_CHECKS = {'credentials': Credential, 'labels': Label, 'instance_groups': InstanceGroup}
return qs.exclude(pk__in=Credential._accessible_pk_qs(Credential, self.user, 'use_role')).exists()
def has_credentials_access(self, obj): def _related_filtered_queryset(self, cls):
# user has access if no related credentials exist that the user lacks use role for if cls is Label:
return not self._unusable_creds_exist(obj.credentials) return LabelAccess(self.user).filtered_queryset()
elif cls is InstanceGroup:
return InstanceGroupAccess(self.user).filtered_queryset()
else:
return cls._accessible_pk_qs(cls, self.user, 'use_role')
def has_obj_m2m_access(self, obj):
for relationship, cls in self.M2M_CHECKS.items():
if getattr(obj, relationship).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
return False
return True
@check_superuser @check_superuser
def can_add(self, data, template=None): def can_add(self, data, template=None):
# This is a special case, we don't check related many-to-many elsewhere # This is a special case, we don't check related many-to-many elsewhere
# launch RBAC checks use this # launch RBAC checks use this
if 'credentials' in data and data['credentials'] or 'reference_obj' in data: if 'reference_obj' in data:
if 'reference_obj' in data: if not self.has_obj_m2m_access(data['reference_obj']):
prompted_cred_qs = data['reference_obj'].credentials.all()
else:
# If given model objects, only use the primary key from them
cred_pks = [cred.pk for cred in data['credentials']]
if template:
for cred in template.credentials.all():
if cred.pk in cred_pks:
cred_pks.remove(cred.pk)
prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
if self._unusable_creds_exist(prompted_cred_qs):
return False return False
return self.check_related('inventory', Inventory, data, role_field='use_role') else:
for relationship, cls in self.M2M_CHECKS.items():
if relationship in data and data[relationship]:
# If given model objects, only use the primary key from them
sub_obj_pks = [sub_obj.pk for sub_obj in data[relationship]]
if template:
for sub_obj in getattr(template, relationship).all():
if sub_obj.pk in sub_obj_pks:
sub_obj_pks.remove(sub_obj.pk)
if cls.objects.filter(pk__in=sub_obj_pks).exclude(pk__in=self._related_filtered_queryset(cls)).exists():
return False
return self.check_related('inventory', Inventory, data, role_field='use_role') and self.check_related(
'execution_environment', ExecutionEnvironment, data, role_field='read_role'
)
@check_superuser @check_superuser
def can_use(self, obj): def can_use(self, obj):
return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj) return (
self.has_obj_m2m_access(obj)
and self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True)
and self.check_related('execution_environment', ExecutionEnvironment, {}, obj=obj, role_field='read_role')
)
def can_change(self, obj, data): def can_change(self, obj, data):
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role') and self.check_related(
'execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role'
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): )
if isinstance(sub_obj, Credential) and relationship == 'credentials':
return self.user in sub_obj.use_role
else:
raise NotImplementedError('Only credentials can be attached to launch configurations.')
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if isinstance(sub_obj, Credential) and relationship == 'credentials':
if skip_sub_obj_read_check:
return True
else:
return self.user in sub_obj.read_role
else:
raise NotImplementedError('Only credentials can be attached to launch configurations.')
class WorkflowJobTemplateNodeAccess(BaseAccess): class WorkflowJobTemplateNodeAccess(UnifiedCredentialsMixin, BaseAccess):
""" """
I can see/use a WorkflowJobTemplateNode if I have read permission I can see/use a WorkflowJobTemplateNode if I have read permission
to associated Workflow Job Template to associated Workflow Job Template
@@ -1911,7 +1925,7 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
""" """
model = WorkflowJobTemplateNode model = WorkflowJobTemplateNode
prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'credentials', 'workflow_job_template') prefetch_related = ('success_nodes', 'failure_nodes', 'always_nodes', 'unified_job_template', 'workflow_job_template')
def filtered_queryset(self): def filtered_queryset(self):
return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role')) return self.model.objects.filter(workflow_job_template__in=WorkflowJobTemplate.accessible_objects(self.user, 'read_role'))
@@ -1923,7 +1937,8 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
return ( return (
self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True) self.check_related('workflow_job_template', WorkflowJobTemplate, data, mandatory=True)
and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role') and self.check_related('unified_job_template', UnifiedJobTemplate, data, role_field='execute_role')
and JobLaunchConfigAccess(self.user).can_add(data) and self.check_related('inventory', Inventory, data, role_field='use_role')
and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
) )
def wfjt_admin(self, obj): def wfjt_admin(self, obj):
@@ -1932,17 +1947,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
else: else:
return self.user in obj.workflow_job_template.admin_role return self.user in obj.workflow_job_template.admin_role
def ujt_execute(self, obj): def ujt_execute(self, obj, data=None):
if not obj.unified_job_template: if not obj.unified_job_template:
return True return True
return self.check_related('unified_job_template', UnifiedJobTemplate, {}, obj=obj, role_field='execute_role', mandatory=True) return self.check_related('unified_job_template', UnifiedJobTemplate, data, obj=obj, role_field='execute_role', mandatory=True)
def can_change(self, obj, data): def can_change(self, obj, data):
if not data:
return True
# should not be able to edit the prompts if lacking access to UJT or WFJT # should not be able to edit the prompts if lacking access to UJT or WFJT
return self.ujt_execute(obj) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data) return self.ujt_execute(obj, data=data) and self.wfjt_admin(obj) and JobLaunchConfigAccess(self.user).can_change(obj, data)
def can_delete(self, obj): def can_delete(self, obj):
return self.wfjt_admin(obj) return self.wfjt_admin(obj)
@@ -1955,29 +1967,14 @@ class WorkflowJobTemplateNodeAccess(BaseAccess):
return True return True
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
if not self.wfjt_admin(obj): if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return False return self.wfjt_admin(obj) and self.check_same_WFJT(obj, sub_obj)
if relationship == 'credentials': return super().can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
# Need permission to related template to attach a credential
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False): def can_unattach(self, obj, sub_obj, relationship, data=None):
if not self.wfjt_admin(obj): if relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return False return self.wfjt_admin(obj)
if relationship == 'credentials': return super().can_unattach(obj, sub_obj, relationship, data=None)
if not self.ujt_execute(obj):
return False
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
elif relationship in ('success_nodes', 'failure_nodes', 'always_nodes'):
return self.check_same_WFJT(obj, sub_obj)
else:
raise NotImplementedError('Relationship {} not understood for WFJT nodes.'.format(relationship))
class WorkflowJobNodeAccess(BaseAccess): class WorkflowJobNodeAccess(BaseAccess):
@@ -2052,13 +2049,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if not data: # So the browseable API will work if not data: # So the browseable API will work
return Organization.accessible_objects(self.user, 'workflow_admin_role').exists() return Organization.accessible_objects(self.user, 'workflow_admin_role').exists()
if data.get('execution_environment'): return bool(
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data) self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True)
if not self.user.can_access(ExecutionEnvironment, 'read', ee): and self.check_related('inventory', Inventory, data, role_field='use_role')
return False and self.check_related('execution_environment', ExecutionEnvironment, data, role_field='read_role')
return self.check_related('organization', Organization, data, role_field='workflow_admin_role', mandatory=True) and self.check_related(
'inventory', Inventory, data, role_field='use_role'
) )
def can_copy(self, obj): def can_copy(self, obj):
@@ -2104,14 +2098,10 @@ class WorkflowJobTemplateAccess(NotificationAttachMixin, BaseAccess):
if self.user.is_superuser: if self.user.is_superuser:
return True return True
if data and data.get('execution_environment'):
ee = get_object_from_data('execution_environment', ExecutionEnvironment, data)
if not self.user.can_access(ExecutionEnvironment, 'read', ee):
return False
return ( return (
self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj) self.check_related('organization', Organization, data, role_field='workflow_admin_role', obj=obj)
and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj) and self.check_related('inventory', Inventory, data, role_field='use_role', obj=obj)
and self.check_related('execution_environment', ExecutionEnvironment, data, obj=obj, role_field='read_role')
and self.user in obj.admin_role and self.user in obj.admin_role
) )
@@ -2518,7 +2508,7 @@ class UnifiedJobAccess(BaseAccess):
return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True) return super(UnifiedJobAccess, self).get_queryset().filter(workflowapproval__isnull=True)
class ScheduleAccess(BaseAccess): class ScheduleAccess(UnifiedCredentialsMixin, BaseAccess):
""" """
I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access I can see a schedule if I can see it's related unified job, I can create them or update them if I have write access
""" """
@@ -2559,12 +2549,6 @@ class ScheduleAccess(BaseAccess):
def can_delete(self, obj): def can_delete(self, obj):
return self.can_change(obj, {}) return self.can_change(obj, {})
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
def can_unattach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
return JobLaunchConfigAccess(self.user).can_unattach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
class NotificationTemplateAccess(BaseAccess): class NotificationTemplateAccess(BaseAccess):
""" """

View File

@@ -0,0 +1,225 @@
# Generated by Django 3.2.13 on 2022-09-15 14:07
import awx.main.fields
import awx.main.utils.polymorphic
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0168_inventoryupdate_scm_revision'),
]
operations = [
migrations.AddField(
model_name='joblaunchconfig',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='joblaunchconfig_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='joblaunchconfig',
name='labels',
field=models.ManyToManyField(related_name='joblaunchconfig_labels', to='main.Label'),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_execution_environment_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_forks_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_instance_groups_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_job_slice_count_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_labels_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='jobtemplate',
name='ask_timeout_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='schedule',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='schedule_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='schedule',
name='labels',
field=models.ManyToManyField(related_name='schedule_labels', to='main.Label'),
),
migrations.AddField(
model_name='workflowjobnode',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='workflowjobnode_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='workflowjobnode',
name='labels',
field=models.ManyToManyField(related_name='workflowjobnode_labels', to='main.Label'),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_labels_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_skip_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='execution_environment',
field=models.ForeignKey(
blank=True,
default=None,
help_text='The container image to be used for execution.',
null=True,
on_delete=awx.main.utils.polymorphic.SET_NULL,
related_name='workflowjobtemplatenode_as_prompt',
to='main.executionenvironment',
),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='labels',
field=models.ManyToManyField(related_name='workflowjobtemplatenode_labels', to='main.Label'),
),
migrations.CreateModel(
name='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobtemplatenode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobtemplatenode')),
],
),
migrations.CreateModel(
name='WorkflowJobNodeBaseInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjobnode')),
],
),
migrations.CreateModel(
name='WorkflowJobInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('workflowjobnode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.workflowjob')),
],
),
migrations.CreateModel(
name='ScheduleInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.schedule')),
],
),
migrations.CreateModel(
name='JobLaunchConfigInstanceGroupMembership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
('joblaunchconfig', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.joblaunchconfig')),
],
),
migrations.AddField(
model_name='joblaunchconfig',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True, editable=False, related_name='joblaunchconfigs', through='main.JobLaunchConfigInstanceGroupMembership', to='main.InstanceGroup'
),
),
migrations.AddField(
model_name='schedule',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True, editable=False, related_name='schedule_instance_groups', through='main.ScheduleInstanceGroupMembership', to='main.InstanceGroup'
),
),
migrations.AddField(
model_name='workflowjob',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_instance_groups',
through='main.WorkflowJobInstanceGroupMembership',
to='main.InstanceGroup',
),
),
migrations.AddField(
model_name='workflowjobnode',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_node_instance_groups',
through='main.WorkflowJobNodeBaseInstanceGroupMembership',
to='main.InstanceGroup',
),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='instance_groups',
field=awx.main.fields.OrderedManyToManyField(
blank=True,
editable=False,
related_name='workflow_job_template_node_instance_groups',
through='main.WorkflowJobTemplateNodeBaseInstanceGroupMembership',
to='main.InstanceGroup',
),
),
]

View File

@@ -434,3 +434,58 @@ class InventoryInstanceGroupMembership(models.Model):
default=None, default=None,
db_index=True, db_index=True,
) )
class JobLaunchConfigInstanceGroupMembership(models.Model):
joblaunchconfig = models.ForeignKey('JobLaunchConfig', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class ScheduleInstanceGroupMembership(models.Model):
schedule = models.ForeignKey('Schedule', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class WorkflowJobTemplateNodeBaseInstanceGroupMembership(models.Model):
workflowjobtemplatenode = models.ForeignKey('WorkflowJobTemplateNode', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class WorkflowJobNodeBaseInstanceGroupMembership(models.Model):
workflowjobnode = models.ForeignKey('WorkflowJobNode', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)
class WorkflowJobInstanceGroupMembership(models.Model):
workflowjobnode = models.ForeignKey('WorkflowJob', on_delete=models.CASCADE)
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
position = models.PositiveIntegerField(
null=True,
default=None,
db_index=True,
)

View File

@@ -43,8 +43,8 @@ from awx.main.models.notifications import (
NotificationTemplate, NotificationTemplate,
JobNotificationMixin, JobNotificationMixin,
) )
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
SurveyJobTemplateMixin, SurveyJobTemplateMixin,
@@ -227,15 +227,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
blank=True, blank=True,
default=False, default=False,
) )
ask_limit_on_launch = AskForField(
blank=True,
default=False,
)
ask_tags_on_launch = AskForField(blank=True, default=False, allows_field='job_tags')
ask_skip_tags_on_launch = AskForField(
blank=True,
default=False,
)
ask_job_type_on_launch = AskForField( ask_job_type_on_launch = AskForField(
blank=True, blank=True,
default=False, default=False,
@@ -244,12 +235,27 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
blank=True, blank=True,
default=False, default=False,
) )
ask_inventory_on_launch = AskForField( ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials')
ask_execution_environment_on_launch = AskForField(
blank=True,
default=False,
)
ask_forks_on_launch = AskForField(
blank=True,
default=False,
)
ask_job_slice_count_on_launch = AskForField(
blank=True,
default=False,
)
ask_timeout_on_launch = AskForField(
blank=True,
default=False,
)
ask_instance_groups_on_launch = AskForField(
blank=True, blank=True,
default=False, default=False,
) )
ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials')
ask_scm_branch_on_launch = AskForField(blank=True, default=False, allows_field='scm_branch')
job_slice_count = models.PositiveIntegerField( job_slice_count = models.PositiveIntegerField(
blank=True, blank=True,
default=1, default=1,
@@ -276,7 +282,17 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
return set(f.name for f in JobOptions._meta.fields) | set( return set(f.name for f in JobOptions._meta.fields) | set(
['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials', 'job_slice_number', 'job_slice_count', 'execution_environment'] [
'name',
'description',
'organization',
'survey_passwords',
'labels',
'credentials',
'job_slice_number',
'job_slice_count',
'execution_environment',
]
) )
@property @property
@@ -314,10 +330,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
actual_inventory = self.inventory actual_inventory = self.inventory
if self.ask_inventory_on_launch and 'inventory' in kwargs: if self.ask_inventory_on_launch and 'inventory' in kwargs:
actual_inventory = kwargs['inventory'] actual_inventory = kwargs['inventory']
actual_slice_count = self.job_slice_count
if self.ask_job_slice_count_on_launch and 'job_slice_count' in kwargs:
actual_slice_count = kwargs['job_slice_count']
if actual_inventory: if actual_inventory:
return min(self.job_slice_count, actual_inventory.hosts.count()) return min(actual_slice_count, actual_inventory.hosts.count())
else: else:
return self.job_slice_count return actual_slice_count
def save(self, *args, **kwargs): def save(self, *args, **kwargs):
update_fields = kwargs.get('update_fields', []) update_fields = kwargs.get('update_fields', [])
@@ -425,10 +444,15 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
field = self._meta.get_field(field_name) field = self._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField): if isinstance(field, models.ManyToManyField):
old_value = set(old_value.all()) if field_name == 'instance_groups':
new_value = set(kwargs[field_name]) - old_value # Instance groups are ordered so we can't make a set out of them
if not new_value: old_value = old_value.all()
continue elif field_name == 'credentials':
# Credentials have a weird pattern because of how they are layered
old_value = set(old_value.all())
new_value = set(kwargs[field_name]) - old_value
if not new_value:
continue
if new_value == old_value: if new_value == old_value:
# no-op case: Fields the same as template's value # no-op case: Fields the same as template's value
@@ -449,6 +473,10 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
rejected_data[field_name] = new_value rejected_data[field_name] = new_value
errors_dict[field_name] = _('Project does not allow override of branch.') errors_dict[field_name] = _('Project does not allow override of branch.')
continue continue
elif field_name == 'job_slice_count' and (new_value > 1) and (self.get_effective_slice_ct(kwargs) <= 1):
rejected_data[field_name] = new_value
errors_dict[field_name] = _('Job inventory does not have enough hosts for slicing')
continue
# accepted prompt # accepted prompt
prompted_data[field_name] = new_value prompted_data[field_name] = new_value
else: else:
@@ -767,6 +795,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
@property @property
def preferred_instance_groups(self): def preferred_instance_groups(self):
# If the user specified instance groups those will be handled by the unified_job.create_unified_job
# This function handles only the defaults for a template w/o user specification
if self.organization is not None: if self.organization is not None:
organization_groups = [x for x in self.organization.instance_groups.all()] organization_groups = [x for x in self.organization.instance_groups.all()]
else: else:
@@ -906,10 +936,36 @@ class LaunchTimeConfigBase(BaseModel):
# This is a solution to the nullable CharField problem, specific to prompting # This is a solution to the nullable CharField problem, specific to prompting
char_prompts = JSONBlob(default=dict, blank=True) char_prompts = JSONBlob(default=dict, blank=True)
def prompts_dict(self, display=False): # Define fields that are not really fields, but alias to char_prompts lookups
limit = NullablePromptPseudoField('limit')
scm_branch = NullablePromptPseudoField('scm_branch')
job_tags = NullablePromptPseudoField('job_tags')
skip_tags = NullablePromptPseudoField('skip_tags')
diff_mode = NullablePromptPseudoField('diff_mode')
job_type = NullablePromptPseudoField('job_type')
verbosity = NullablePromptPseudoField('verbosity')
forks = NullablePromptPseudoField('forks')
job_slice_count = NullablePromptPseudoField('job_slice_count')
timeout = NullablePromptPseudoField('timeout')
# NOTE: additional fields are assumed to exist but must be defined in subclasses
# due to technical limitations
SUBCLASS_FIELDS = (
'instance_groups', # needs a through model defined
'extra_vars', # alternates between extra_vars and extra_data
'credentials', # already a unified job and unified JT field
'labels', # already a unified job and unified JT field
'execution_environment', # already a unified job and unified JT field
)
def prompts_dict(self, display=False, for_cls=None):
data = {} data = {}
if for_cls:
cls = for_cls
else:
cls = JobTemplate
# Some types may have different prompts, but always subset of JT prompts # Some types may have different prompts, but always subset of JT prompts
for prompt_name in JobTemplate.get_ask_mapping().keys(): for prompt_name in cls.get_ask_mapping().keys():
try: try:
field = self._meta.get_field(prompt_name) field = self._meta.get_field(prompt_name)
except FieldDoesNotExist: except FieldDoesNotExist:
@@ -917,18 +973,23 @@ class LaunchTimeConfigBase(BaseModel):
if isinstance(field, models.ManyToManyField): if isinstance(field, models.ManyToManyField):
if not self.pk: if not self.pk:
continue # unsaved object can't have related many-to-many continue # unsaved object can't have related many-to-many
prompt_val = set(getattr(self, prompt_name).all()) prompt_values = list(getattr(self, prompt_name).all())
if len(prompt_val) > 0: # Many to manys can't distinguish between None and []
data[prompt_name] = prompt_val # Because of this, from a config perspective, we assume [] is none and we don't save [] into the config
if len(prompt_values) > 0:
data[prompt_name] = prompt_values
elif prompt_name == 'extra_vars': elif prompt_name == 'extra_vars':
if self.extra_vars: if self.extra_vars:
extra_vars = {}
if display: if display:
data[prompt_name] = self.display_extra_vars() extra_vars = self.display_extra_vars()
else: else:
data[prompt_name] = self.extra_vars extra_vars = self.extra_vars
# Depending on model, field type may save and return as string # Depending on model, field type may save and return as string
if isinstance(data[prompt_name], str): if isinstance(extra_vars, str):
data[prompt_name] = parse_yaml_or_json(data[prompt_name]) extra_vars = parse_yaml_or_json(extra_vars)
if extra_vars:
data['extra_vars'] = extra_vars
if self.survey_passwords and not display: if self.survey_passwords and not display:
data['survey_passwords'] = self.survey_passwords data['survey_passwords'] = self.survey_passwords
else: else:
@@ -938,15 +999,6 @@ class LaunchTimeConfigBase(BaseModel):
return data return data
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'extra_vars':
continue
try:
LaunchTimeConfigBase._meta.get_field(field_name)
except FieldDoesNotExist:
setattr(LaunchTimeConfigBase, field_name, NullablePromptPseudoField(field_name))
class LaunchTimeConfig(LaunchTimeConfigBase): class LaunchTimeConfig(LaunchTimeConfigBase):
""" """
Common model for all objects that save details of a saved launch config Common model for all objects that save details of a saved launch config
@@ -965,8 +1017,18 @@ class LaunchTimeConfig(LaunchTimeConfigBase):
blank=True, blank=True,
) )
) )
# Credentials needed for non-unified job / unified JT models # Fields needed for non-unified job / unified JT models, because they are defined on unified models
credentials = models.ManyToManyField('Credential', related_name='%(class)ss') credentials = models.ManyToManyField('Credential', related_name='%(class)ss')
labels = models.ManyToManyField('Label', related_name='%(class)s_labels')
execution_environment = models.ForeignKey(
'ExecutionEnvironment',
null=True,
blank=True,
default=None,
on_delete=polymorphic.SET_NULL,
related_name='%(class)s_as_prompt',
help_text="The container image to be used for execution.",
)
@property @property
def extra_vars(self): def extra_vars(self):
@@ -1010,6 +1072,11 @@ class JobLaunchConfig(LaunchTimeConfig):
editable=False, editable=False,
) )
# Instance Groups needed for non-unified job / unified JT models
instance_groups = OrderedManyToManyField(
'InstanceGroup', related_name='%(class)ss', blank=True, editable=False, through='JobLaunchConfigInstanceGroupMembership'
)
def has_user_prompts(self, template): def has_user_prompts(self, template):
""" """
Returns True if any fields exist in the launch config that are Returns True if any fields exist in the launch config that are

View File

@@ -10,6 +10,8 @@ from awx.api.versioning import reverse
from awx.main.models.base import CommonModelNameNotUnique from awx.main.models.base import CommonModelNameNotUnique
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob
from awx.main.models.inventory import Inventory from awx.main.models.inventory import Inventory
from awx.main.models.schedules import Schedule
from awx.main.models.workflow import WorkflowJobTemplateNode, WorkflowJobNode
__all__ = ('Label',) __all__ = ('Label',)
@@ -34,16 +36,22 @@ class Label(CommonModelNameNotUnique):
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:label_detail', kwargs={'pk': self.pk}, request=request)
@staticmethod
def get_orphaned_labels():
return Label.objects.filter(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
def is_detached(self): def is_detached(self):
return Label.objects.filter(id=self.id, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True).exists() return Label.objects.filter(
id=self.id,
unifiedjob_labels__isnull=True,
unifiedjobtemplate_labels__isnull=True,
inventory_labels__isnull=True,
schedule_labels__isnull=True,
workflowjobtemplatenode_labels__isnull=True,
workflowjobnode_labels__isnull=True,
).exists()
def is_candidate_for_detach(self): def is_candidate_for_detach(self):
count = UnifiedJob.objects.filter(labels__in=[self.id]).count() # Both Jobs and WFJobs
c1 = UnifiedJob.objects.filter(labels__in=[self.id]).count() count += UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count() # Both JTs and WFJT
c2 = UnifiedJobTemplate.objects.filter(labels__in=[self.id]).count() count += Inventory.objects.filter(labels__in=[self.id]).count()
c3 = Inventory.objects.filter(labels__in=[self.id]).count() count += Schedule.objects.filter(labels__in=[self.id]).count()
return (c1 + c2 + c3 - 1) == 0 count += WorkflowJobTemplateNode.objects.filter(labels__in=[self.id]).count()
count += WorkflowJobNode.objects.filter(labels__in=[self.id]).count()
return (count - 1) == 0

View File

@@ -104,6 +104,33 @@ class SurveyJobTemplateMixin(models.Model):
default=False, default=False,
) )
survey_spec = prevent_search(JSONBlob(default=dict, blank=True)) survey_spec = prevent_search(JSONBlob(default=dict, blank=True))
ask_inventory_on_launch = AskForField(
blank=True,
default=False,
)
ask_limit_on_launch = AskForField(
blank=True,
default=False,
)
ask_scm_branch_on_launch = AskForField(
blank=True,
default=False,
allows_field='scm_branch',
)
ask_labels_on_launch = AskForField(
blank=True,
default=False,
)
ask_tags_on_launch = AskForField(
blank=True,
default=False,
allows_field='job_tags',
)
ask_skip_tags_on_launch = AskForField(
blank=True,
default=False,
)
ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars') ask_variables_on_launch = AskForField(blank=True, default=False, allows_field='extra_vars')
def survey_password_variables(self): def survey_password_variables(self):

View File

@@ -18,6 +18,7 @@ from django.utils.translation import gettext_lazy as _
# AWX # AWX
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.main.fields import OrderedManyToManyField
from awx.main.models.base import PrimordialModel from awx.main.models.base import PrimordialModel
from awx.main.models.jobs import LaunchTimeConfig from awx.main.models.jobs import LaunchTimeConfig
from awx.main.utils import ignore_inventory_computed_fields from awx.main.utils import ignore_inventory_computed_fields
@@ -83,6 +84,13 @@ class Schedule(PrimordialModel, LaunchTimeConfig):
) )
rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule.")) rrule = models.TextField(help_text=_("A value representing the schedules iCal recurrence rule."))
next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run.")) next_run = models.DateTimeField(null=True, default=None, editable=False, help_text=_("The next time that the scheduled action will run."))
instance_groups = OrderedManyToManyField(
'InstanceGroup',
related_name='schedule_instance_groups',
blank=True,
editable=False,
through='ScheduleInstanceGroupMembership',
)
@classmethod @classmethod
def get_zoneinfo(cls): def get_zoneinfo(cls):

View File

@@ -332,10 +332,11 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
return NotificationTemplate.objects.none() return NotificationTemplate.objects.none()
def create_unified_job(self, **kwargs): def create_unified_job(self, instance_groups=None, **kwargs):
""" """
Create a new unified job based on this unified job template. Create a new unified job based on this unified job template.
""" """
# TODO: rename kwargs to prompts, to set expectation that these are runtime values
new_job_passwords = kwargs.pop('survey_passwords', {}) new_job_passwords = kwargs.pop('survey_passwords', {})
eager_fields = kwargs.pop('_eager_fields', None) eager_fields = kwargs.pop('_eager_fields', None)
@@ -382,7 +383,10 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
unified_job.survey_passwords = new_job_passwords unified_job.survey_passwords = new_job_passwords
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache() if instance_groups:
unified_job.preferred_instance_groups_cache = [ig.id for ig in instance_groups]
else:
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache()
unified_job._set_default_dependencies_processed() unified_job._set_default_dependencies_processed()
unified_job.task_impact = unified_job._get_task_impact() unified_job.task_impact = unified_job._get_task_impact()
@@ -412,13 +416,17 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
unified_job.handle_extra_data(validated_kwargs['extra_vars']) unified_job.handle_extra_data(validated_kwargs['extra_vars'])
# Create record of provided prompts for relaunch and rescheduling # Create record of provided prompts for relaunch and rescheduling
unified_job.create_config_from_prompts(kwargs, parent=self) config = unified_job.create_config_from_prompts(kwargs, parent=self)
if instance_groups:
for ig in instance_groups:
config.instance_groups.add(ig)
# manually issue the create activity stream entry _after_ M2M relations # manually issue the create activity stream entry _after_ M2M relations
# have been associated to the UJ # have been associated to the UJ
if unified_job.__class__ in activity_stream_registrar.models: if unified_job.__class__ in activity_stream_registrar.models:
activity_stream_create(None, unified_job, True) activity_stream_create(None, unified_job, True)
unified_job.log_lifecycle("created") unified_job.log_lifecycle("created")
return unified_job return unified_job
@classmethod @classmethod
@@ -973,22 +981,38 @@ class UnifiedJob(
valid_fields.extend(['survey_passwords', 'extra_vars']) valid_fields.extend(['survey_passwords', 'extra_vars'])
else: else:
kwargs.pop('survey_passwords', None) kwargs.pop('survey_passwords', None)
many_to_many_fields = []
for field_name, value in kwargs.items(): for field_name, value in kwargs.items():
if field_name not in valid_fields: if field_name not in valid_fields:
raise Exception('Unrecognized launch config field {}.'.format(field_name)) raise Exception('Unrecognized launch config field {}.'.format(field_name))
if field_name == 'credentials': field = None
# may use extra_data as a proxy for extra_vars
if field_name in config.SUBCLASS_FIELDS and field_name != 'extra_vars':
field = config._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField):
many_to_many_fields.append(field_name)
continue continue
key = field_name if isinstance(field, (models.ForeignKey)) and (value is None):
if key == 'extra_vars': continue # the null value indicates not-provided for ForeignKey case
key = 'extra_data' setattr(config, field_name, value)
setattr(config, key, value)
config.save() config.save()
job_creds = set(kwargs.get('credentials', [])) for field_name in many_to_many_fields:
if 'credentials' in [field.name for field in parent._meta.get_fields()]: prompted_items = kwargs.get(field_name, [])
job_creds = job_creds - set(parent.credentials.all()) if not prompted_items:
if job_creds: continue
config.credentials.add(*job_creds) if field_name == 'instance_groups':
# Here we are doing a loop to make sure we preserve order for this Ordered field
# also do not merge IGs with parent, so this saves the literal list
for item in prompted_items:
getattr(config, field_name).add(item)
else:
# Assuming this field merges prompts with parent, save just the diff
if field_name in [field.name for field in parent._meta.get_fields()]:
prompted_items = set(prompted_items) - set(getattr(parent, field_name).all())
if prompted_items:
getattr(config, field_name).add(*prompted_items)
return config return config
@property @property

View File

@@ -29,7 +29,7 @@ from awx.main.models import prevent_search, accepts_json, UnifiedJobTemplate, Un
from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin from awx.main.models.notifications import NotificationTemplate, JobNotificationMixin
from awx.main.models.base import CreatedModifiedModel, VarsDictProperty from awx.main.models.base import CreatedModifiedModel, VarsDictProperty
from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR from awx.main.models.rbac import ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, ROLE_SINGLETON_SYSTEM_AUDITOR
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob from awx.main.fields import ImplicitRoleField, JSONBlob, OrderedManyToManyField
from awx.main.models.mixins import ( from awx.main.models.mixins import (
ResourceMixin, ResourceMixin,
SurveyJobTemplateMixin, SurveyJobTemplateMixin,
@@ -114,6 +114,9 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
'credentials', 'credentials',
'char_prompts', 'char_prompts',
'all_parents_must_converge', 'all_parents_must_converge',
'labels',
'instance_groups',
'execution_environment',
] ]
def create_workflow_job_node(self, **kwargs): def create_workflow_job_node(self, **kwargs):
@@ -122,7 +125,7 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
""" """
create_kwargs = {} create_kwargs = {}
for field_name in self._get_workflow_job_field_names(): for field_name in self._get_workflow_job_field_names():
if field_name == 'credentials': if field_name in ['credentials', 'labels', 'instance_groups']:
continue continue
if field_name in kwargs: if field_name in kwargs:
create_kwargs[field_name] = kwargs[field_name] create_kwargs[field_name] = kwargs[field_name]
@@ -132,10 +135,20 @@ class WorkflowNodeBase(CreatedModifiedModel, LaunchTimeConfig):
new_node = WorkflowJobNode.objects.create(**create_kwargs) new_node = WorkflowJobNode.objects.create(**create_kwargs)
if self.pk: if self.pk:
allowed_creds = self.credentials.all() allowed_creds = self.credentials.all()
allowed_labels = self.labels.all()
allowed_instance_groups = self.instance_groups.all()
else: else:
allowed_creds = [] allowed_creds = []
allowed_labels = []
allowed_instance_groups = []
for cred in allowed_creds: for cred in allowed_creds:
new_node.credentials.add(cred) new_node.credentials.add(cred)
for label in allowed_labels:
new_node.labels.add(label)
for instance_group in allowed_instance_groups:
new_node.instance_groups.add(instance_group)
return new_node return new_node
@@ -153,6 +166,9 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
'char_prompts', 'char_prompts',
'all_parents_must_converge', 'all_parents_must_converge',
'identifier', 'identifier',
'labels',
'execution_environment',
'instance_groups',
] ]
REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords'] REENCRYPTION_BLOCKLIST_AT_COPY = ['extra_data', 'survey_passwords']
@@ -167,6 +183,13 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
blank=False, blank=False,
help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'), help_text=_('An identifier for this node that is unique within its workflow. ' 'It is copied to workflow job nodes corresponding to this node.'),
) )
instance_groups = OrderedManyToManyField(
'InstanceGroup',
related_name='workflow_job_template_node_instance_groups',
blank=True,
editable=False,
through='WorkflowJobTemplateNodeBaseInstanceGroupMembership',
)
class Meta: class Meta:
app_label = 'main' app_label = 'main'
@@ -211,7 +234,7 @@ class WorkflowJobTemplateNode(WorkflowNodeBase):
approval_template = WorkflowApprovalTemplate(**kwargs) approval_template = WorkflowApprovalTemplate(**kwargs)
approval_template.save() approval_template.save()
self.unified_job_template = approval_template self.unified_job_template = approval_template
self.save() self.save(update_fields=['unified_job_template'])
return approval_template return approval_template
@@ -250,6 +273,9 @@ class WorkflowJobNode(WorkflowNodeBase):
blank=True, # blank denotes pre-migration job nodes blank=True, # blank denotes pre-migration job nodes
help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'), help_text=_('An identifier coresponding to the workflow job template node that this node was created from.'),
) )
instance_groups = OrderedManyToManyField(
'InstanceGroup', related_name='workflow_job_node_instance_groups', blank=True, editable=False, through='WorkflowJobNodeBaseInstanceGroupMembership'
)
class Meta: class Meta:
app_label = 'main' app_label = 'main'
@@ -265,19 +291,6 @@ class WorkflowJobNode(WorkflowNodeBase):
def get_absolute_url(self, request=None): def get_absolute_url(self, request=None):
return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request) return reverse('api:workflow_job_node_detail', kwargs={'pk': self.pk}, request=request)
def prompts_dict(self, *args, **kwargs):
r = super(WorkflowJobNode, self).prompts_dict(*args, **kwargs)
# Explanation - WFJT extra_vars still break pattern, so they are not
# put through prompts processing, but inventory and others are only accepted
# if JT prompts for it, so it goes through this mechanism
if self.workflow_job:
if self.workflow_job.inventory_id:
# workflow job inventory takes precedence
r['inventory'] = self.workflow_job.inventory
if self.workflow_job.char_prompts:
r.update(self.workflow_job.char_prompts)
return r
def get_job_kwargs(self): def get_job_kwargs(self):
""" """
In advance of creating a new unified job as part of a workflow, In advance of creating a new unified job as part of a workflow,
@@ -287,16 +300,38 @@ class WorkflowJobNode(WorkflowNodeBase):
""" """
# reject/accept prompted fields # reject/accept prompted fields
data = {} data = {}
wj_special_vars = {}
wj_special_passwords = {}
ujt_obj = self.unified_job_template ujt_obj = self.unified_job_template
if ujt_obj is not None: if ujt_obj is not None:
# MERGE note: move this to prompts_dict method on node when merging node_prompts_data = self.prompts_dict(for_cls=ujt_obj.__class__)
# with the workflow inventory branch wj_prompts_data = self.workflow_job.prompts_dict(for_cls=ujt_obj.__class__)
prompts_data = self.prompts_dict() # Explanation - special historical case
if isinstance(ujt_obj, WorkflowJobTemplate): # WFJT extra_vars ignored JobTemplate.ask_variables_on_launch, bypassing _accept_or_ignore_job_kwargs
if self.workflow_job.extra_vars: # inventory and others are only accepted if JT prompts for it with related ask_ field
prompts_data.setdefault('extra_vars', {}) # this is inconsistent, but maintained
prompts_data['extra_vars'].update(self.workflow_job.extra_vars_dict) if not isinstance(ujt_obj, WorkflowJobTemplate):
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**prompts_data) wj_special_vars = wj_prompts_data.pop('extra_vars', {})
wj_special_passwords = wj_prompts_data.pop('survey_passwords', {})
elif 'extra_vars' in node_prompts_data:
# Follow the vars combination rules
node_prompts_data['extra_vars'].update(wj_prompts_data.pop('extra_vars', {}))
elif 'survey_passwords' in node_prompts_data:
node_prompts_data['survey_passwords'].update(wj_prompts_data.pop('survey_passwords', {}))
# Follow the credential combination rules
if ('credentials' in wj_prompts_data) and ('credentials' in node_prompts_data):
wj_pivoted_creds = Credential.unique_dict(wj_prompts_data['credentials'])
node_pivoted_creds = Credential.unique_dict(node_prompts_data['credentials'])
node_pivoted_creds.update(wj_pivoted_creds)
wj_prompts_data['credentials'] = [cred for cred in node_pivoted_creds.values()]
# NOTE: no special rules for instance_groups, because they do not merge
# or labels, because they do not propogate WFJT-->node at all
# Combine WFJT prompts with node here, WFJT at higher level
node_prompts_data.update(wj_prompts_data)
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**node_prompts_data)
if errors: if errors:
logger.info( logger.info(
_('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format( _('Bad launch configuration starting template {template_pk} as part of ' 'workflow {workflow_pk}. Errors:\n{error_text}').format(
@@ -304,15 +339,6 @@ class WorkflowJobNode(WorkflowNodeBase):
) )
) )
data.update(accepted_fields) # missing fields are handled in the scheduler data.update(accepted_fields) # missing fields are handled in the scheduler
try:
# config saved on the workflow job itself
wj_config = self.workflow_job.launch_config
except ObjectDoesNotExist:
wj_config = None
if wj_config:
accepted_fields, ignored_fields, errors = ujt_obj._accept_or_ignore_job_kwargs(**wj_config.prompts_dict())
accepted_fields.pop('extra_vars', None) # merge handled with other extra_vars later
data.update(accepted_fields)
# build ancestor artifacts, save them to node model for later # build ancestor artifacts, save them to node model for later
aa_dict = {} aa_dict = {}
is_root_node = True is_root_node = True
@@ -325,15 +351,12 @@ class WorkflowJobNode(WorkflowNodeBase):
self.ancestor_artifacts = aa_dict self.ancestor_artifacts = aa_dict
self.save(update_fields=['ancestor_artifacts']) self.save(update_fields=['ancestor_artifacts'])
# process password list # process password list
password_dict = {} password_dict = data.get('survey_passwords', {})
if '_ansible_no_log' in aa_dict: if '_ansible_no_log' in aa_dict:
for key in aa_dict: for key in aa_dict:
if key != '_ansible_no_log': if key != '_ansible_no_log':
password_dict[key] = REPLACE_STR password_dict[key] = REPLACE_STR
if self.workflow_job.survey_passwords: password_dict.update(wj_special_passwords)
password_dict.update(self.workflow_job.survey_passwords)
if self.survey_passwords:
password_dict.update(self.survey_passwords)
if password_dict: if password_dict:
data['survey_passwords'] = password_dict data['survey_passwords'] = password_dict
# process extra_vars # process extra_vars
@@ -343,12 +366,12 @@ class WorkflowJobNode(WorkflowNodeBase):
functional_aa_dict = copy(aa_dict) functional_aa_dict = copy(aa_dict)
functional_aa_dict.pop('_ansible_no_log', None) functional_aa_dict.pop('_ansible_no_log', None)
extra_vars.update(functional_aa_dict) extra_vars.update(functional_aa_dict)
if ujt_obj and isinstance(ujt_obj, JobTemplate):
# Workflow Job extra_vars higher precedence than ancestor artifacts # Workflow Job extra_vars higher precedence than ancestor artifacts
if self.workflow_job and self.workflow_job.extra_vars: extra_vars.update(wj_special_vars)
extra_vars.update(self.workflow_job.extra_vars_dict)
if extra_vars: if extra_vars:
data['extra_vars'] = extra_vars data['extra_vars'] = extra_vars
# ensure that unified jobs created by WorkflowJobs are marked # ensure that unified jobs created by WorkflowJobs are marked
data['_eager_fields'] = {'launch_type': 'workflow'} data['_eager_fields'] = {'launch_type': 'workflow'}
if self.workflow_job and self.workflow_job.created_by: if self.workflow_job and self.workflow_job.created_by:
@@ -374,6 +397,10 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
) )
) )
) )
# Workflow jobs are used for sliced jobs, and thus, must be a conduit for any JT prompts
instance_groups = OrderedManyToManyField(
'InstanceGroup', related_name='workflow_job_instance_groups', blank=True, editable=False, through='WorkflowJobInstanceGroupMembership'
)
allow_simultaneous = models.BooleanField(default=False) allow_simultaneous = models.BooleanField(default=False)
extra_vars_dict = VarsDictProperty('extra_vars', True) extra_vars_dict = VarsDictProperty('extra_vars', True)
@@ -385,7 +412,7 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
@classmethod @classmethod
def _get_unified_job_field_names(cls): def _get_unified_job_field_names(cls):
r = set(f.name for f in WorkflowJobOptions._meta.fields) | set( r = set(f.name for f in WorkflowJobOptions._meta.fields) | set(
['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch'] ['name', 'description', 'organization', 'survey_passwords', 'labels', 'limit', 'scm_branch', 'job_tags', 'skip_tags']
) )
r.remove('char_prompts') # needed due to copying launch config to launch config r.remove('char_prompts') # needed due to copying launch config to launch config
return r return r
@@ -425,26 +452,29 @@ class WorkflowJobOptions(LaunchTimeConfigBase):
class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin): class WorkflowJobTemplate(UnifiedJobTemplate, WorkflowJobOptions, SurveyJobTemplateMixin, ResourceMixin, RelatedJobsMixin, WebhookTemplateMixin):
SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')] SOFT_UNIQUE_TOGETHER = [('polymorphic_ctype', 'name', 'organization')]
FIELDS_TO_PRESERVE_AT_COPY = ['labels', 'organization', 'instance_groups', 'workflow_job_template_nodes', 'credentials', 'survey_spec'] FIELDS_TO_PRESERVE_AT_COPY = [
'labels',
'organization',
'instance_groups',
'workflow_job_template_nodes',
'credentials',
'survey_spec',
'skip_tags',
'job_tags',
'execution_environment',
]
class Meta: class Meta:
app_label = 'main' app_label = 'main'
ask_inventory_on_launch = AskForField( notification_templates_approvals = models.ManyToManyField(
"NotificationTemplate",
blank=True, blank=True,
default=False, related_name='%(class)s_notification_templates_for_approvals',
) )
ask_limit_on_launch = AskForField( admin_role = ImplicitRoleField(
blank=True, parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'],
default=False,
) )
ask_scm_branch_on_launch = AskForField(
blank=True,
default=False,
)
notification_templates_approvals = models.ManyToManyField("NotificationTemplate", blank=True, related_name='%(class)s_notification_templates_for_approvals')
admin_role = ImplicitRoleField(parent_role=['singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR, 'organization.workflow_admin_role'])
execute_role = ImplicitRoleField( execute_role = ImplicitRoleField(
parent_role=[ parent_role=[
'admin_role', 'admin_role',
@@ -713,6 +743,25 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set)) artifacts.update(job.get_effective_artifacts(parents_set=new_parents_set))
return artifacts return artifacts
def prompts_dict(self, *args, **kwargs):
if self.job_template_id:
# HACK: Exception for sliced jobs here, this is bad
# when sliced jobs were introduced, workflows did not have all the prompted JT fields
# so to support prompting with slicing, we abused the workflow job launch config
# these would be more properly saved on the workflow job, but it gets the wrong fields now
try:
wj_config = self.launch_config
r = wj_config.prompts_dict(*args, **kwargs)
except ObjectDoesNotExist:
r = {}
else:
r = super().prompts_dict(*args, **kwargs)
# Workflow labels and job labels are treated separately
# that means that they do not propogate from WFJT / workflow job to jobs in workflow
r.pop('labels', None)
return r
def get_notification_templates(self): def get_notification_templates(self):
return self.workflow_job_template.notification_templates return self.workflow_job_template.notification_templates

View File

@@ -210,7 +210,7 @@ def mk_workflow_job_template(name, extra_vars='', spec=None, organization=None,
if extra_vars: if extra_vars:
extra_vars = json.dumps(extra_vars) extra_vars = json.dumps(extra_vars)
wfjt = WorkflowJobTemplate(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service) wfjt = WorkflowJobTemplate.objects.create(name=name, extra_vars=extra_vars, organization=organization, webhook_service=webhook_service)
if spec: if spec:
wfjt.survey_spec = spec wfjt.survey_spec = spec

View File

@@ -13,17 +13,11 @@ from django.utils import timezone
# AWX # AWX
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin
from awx.main.models import ( from awx.main.models import JobTemplate, User, Job, AdHocCommand, ProjectUpdate, InstanceGroup, Label, Organization
JobTemplate,
User,
Job,
AdHocCommand,
ProjectUpdate,
)
@pytest.mark.django_db @pytest.mark.django_db
def test_job_relaunch_permission_denied_response(post, get, inventory, project, credential, net_credential, machine_credential): def test_job_relaunch_permission_denied_response(post, get, inventory, project, net_credential, machine_credential):
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True) jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project, ask_credential_on_launch=True)
jt.credentials.add(machine_credential) jt.credentials.add(machine_credential)
jt_user = User.objects.create(username='jobtemplateuser') jt_user = User.objects.create(username='jobtemplateuser')
@@ -39,6 +33,22 @@ def test_job_relaunch_permission_denied_response(post, get, inventory, project,
job.launch_config.credentials.add(net_credential) job.launch_config.credentials.add(net_credential)
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403) r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields you do not have access to' in r.data['detail'] assert 'launched with prompted fields you do not have access to' in r.data['detail']
job.launch_config.credentials.clear()
# Job has prompted instance group that user cannot see
job.launch_config.instance_groups.add(InstanceGroup.objects.create())
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields you do not have access to' in r.data['detail']
job.launch_config.instance_groups.clear()
# Job has prompted label that user cannot see
job.launch_config.labels.add(Label.objects.create(organization=Organization.objects.create()))
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields you do not have access to' in r.data['detail']
job.launch_config.labels.clear()
# without any of those prompts, user can launch
r = post(reverse('api:job_relaunch', kwargs={'pk': job.pk}), {}, jt_user, expect=201)
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -4,8 +4,7 @@ import yaml
import json import json
from awx.api.serializers import JobLaunchSerializer from awx.api.serializers import JobLaunchSerializer
from awx.main.models.credential import Credential from awx.main.models import Credential, Inventory, Host, ExecutionEnvironment, Label, InstanceGroup
from awx.main.models.inventory import Inventory, Host
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
from awx.api.versioning import reverse from awx.api.versioning import reverse
@@ -15,6 +14,11 @@ from awx.api.versioning import reverse
def runtime_data(organization, credentialtype_ssh): def runtime_data(organization, credentialtype_ssh):
cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'}) cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'})
inv_obj = organization.inventories.create(name="runtime-inv") inv_obj = organization.inventories.create(name="runtime-inv")
inv_obj.hosts.create(name='foo1')
inv_obj.hosts.create(name='foo2')
ee_obj = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
ig_obj = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
labels_obj = Label.objects.create(name='foo', description='bar', organization=organization)
return dict( return dict(
extra_vars='{"job_launch_var": 4}', extra_vars='{"job_launch_var": 4}',
limit='test-servers', limit='test-servers',
@@ -25,6 +29,12 @@ def runtime_data(organization, credentialtype_ssh):
credentials=[cred_obj.pk], credentials=[cred_obj.pk],
diff_mode=True, diff_mode=True,
verbosity=2, verbosity=2,
execution_environment=ee_obj.pk,
labels=[labels_obj.pk],
forks=7,
job_slice_count=2,
timeout=10,
instance_groups=[ig_obj.pk],
) )
@@ -54,6 +64,12 @@ def job_template_prompts(project, inventory, machine_credential):
ask_credential_on_launch=on_off, ask_credential_on_launch=on_off,
ask_diff_mode_on_launch=on_off, ask_diff_mode_on_launch=on_off,
ask_verbosity_on_launch=on_off, ask_verbosity_on_launch=on_off,
ask_execution_environment_on_launch=on_off,
ask_labels_on_launch=on_off,
ask_forks_on_launch=on_off,
ask_job_slice_count_on_launch=on_off,
ask_timeout_on_launch=on_off,
ask_instance_groups_on_launch=on_off,
) )
jt.credentials.add(machine_credential) jt.credentials.add(machine_credential)
return jt return jt
@@ -77,6 +93,12 @@ def job_template_prompts_null(project):
ask_credential_on_launch=True, ask_credential_on_launch=True,
ask_diff_mode_on_launch=True, ask_diff_mode_on_launch=True,
ask_verbosity_on_launch=True, ask_verbosity_on_launch=True,
ask_execution_environment_on_launch=True,
ask_labels_on_launch=True,
ask_forks_on_launch=True,
ask_job_slice_count_on_launch=True,
ask_timeout_on_launch=True,
ask_instance_groups_on_launch=True,
) )
@@ -92,6 +114,12 @@ def data_to_internal(data):
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials']) internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
if 'inventory' in data: if 'inventory' in data:
internal['inventory'] = Inventory.objects.get(pk=data['inventory']) internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
if 'execution_environment' in data:
internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment'])
if 'labels' in data:
internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']]
if 'instance_groups' in data:
internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']]
return internal return internal
@@ -124,6 +152,12 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad
assert 'credentials' in response.data['ignored_fields'] assert 'credentials' in response.data['ignored_fields']
assert 'job_tags' in response.data['ignored_fields'] assert 'job_tags' in response.data['ignored_fields']
assert 'skip_tags' in response.data['ignored_fields'] assert 'skip_tags' in response.data['ignored_fields']
assert 'execution_environment' in response.data['ignored_fields']
assert 'labels' in response.data['ignored_fields']
assert 'forks' in response.data['ignored_fields']
assert 'job_slice_count' in response.data['ignored_fields']
assert 'timeout' in response.data['ignored_fields']
assert 'instance_groups' in response.data['ignored_fields']
@pytest.mark.django_db @pytest.mark.django_db
@@ -162,6 +196,34 @@ def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
mock_job.signal_start.assert_called_once() mock_job.signal_start.assert_called_once()
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_slice_timeout_forks_need_int(job_template_prompts, post, admin_user, mocker):
job_template = job_template_prompts(True)
mock_job = mocker.MagicMock(spec=Job, id=968)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
response = post(
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'timeout': '', 'job_slice_count': '', 'forks': ''}, admin_user, expect=400
)
assert 'forks' in response.data and response.data['forks'][0] == 'A valid integer is required.'
assert 'job_slice_count' in response.data and response.data['job_slice_count'][0] == 'A valid integer is required.'
assert 'timeout' in response.data and response.data['timeout'][0] == 'A valid integer is required.'
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_slice_count_not_supported(job_template_prompts, post, admin_user):
job_template = job_template_prompts(True)
assert job_template.inventory.hosts.count() == 0
job_template.inventory.hosts.create(name='foo')
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_slice_count': 8}, admin_user, expect=400)
assert response.data['job_slice_count'][0] == 'Job inventory does not have enough hosts for slicing'
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.job_runtime_vars @pytest.mark.job_runtime_vars
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker): def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
@@ -176,6 +238,10 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null,
inventory = Inventory.objects.get(pk=runtime_data['inventory']) inventory = Inventory.objects.get(pk=runtime_data['inventory'])
inventory.use_role.members.add(rando) inventory.use_role.members.add(rando)
# Instance Groups and label can not currently easily be used by rando so we need to remove the instance groups from the runtime data
runtime_data.pop('instance_groups')
runtime_data.pop('labels')
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data) mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job): with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
@@ -243,12 +309,59 @@ def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime
@pytest.mark.django_db @pytest.mark.django_db
@pytest.mark.job_runtime_vars @pytest.mark.job_runtime_vars
def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando): def test_job_launch_works_without_access_to_ig_if_ig_in_template(job_template_prompts, runtime_data, post, rando, mocker):
job_template = job_template_prompts(True)
job_template.instance_groups.add(InstanceGroup.objects.get(id=runtime_data['instance_groups'][0]))
job_template.instance_groups.add(InstanceGroup.objects.create(name='foo'))
job_template.save()
job_template.execute_role.members.add(rando)
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(instance_groups=runtime_data['instance_groups']), rando, expect=201)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_works_without_access_to_label_if_label_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
job_template = job_template_prompts(True)
job_template.labels.add(Label.objects.get(id=runtime_data['labels'][0]))
job_template.labels.add(Label.objects.create(name='baz', description='faz', organization=organization))
job_template.save()
job_template.execute_role.members.add(rando)
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(labels=runtime_data['labels']), rando, expect=201)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_works_without_access_to_ee_if_ee_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
job_template = job_template_prompts(True)
job_template.execute_role.members.add(rando)
# Make sure we get a 201 instead of a 403 since we are providing an override that is already in the template
post(
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(execution_environment=runtime_data['execution_environment']), rando, expect=201
)
@pytest.mark.parametrize(
'item_type',
[
('credentials'),
('labels'),
('instance_groups'),
],
)
@pytest.mark.django_db
@pytest.mark.job_runtime_vars
def test_job_launch_fails_without_access(job_template_prompts, runtime_data, post, rando, item_type):
job_template = job_template_prompts(True) job_template = job_template_prompts(True)
job_template.execute_role.members.add(rando) job_template.execute_role.members.add(rando)
# Assure that giving a credential without access blocks the launch # Assure that giving a credential without access blocks the launch
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(credentials=runtime_data['credentials']), rando, expect=403) data = {item_type: runtime_data[item_type]}
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), data, rando, expect=403)
@pytest.mark.django_db @pytest.mark.django_db

View File

@@ -77,6 +77,18 @@ class TestApprovalNodes:
assert approval_node.unified_job_template.description == 'Approval Node' assert approval_node.unified_job_template.description == 'Approval Node'
assert approval_node.unified_job_template.timeout == 0 assert approval_node.unified_job_template.timeout == 0
def test_approval_node_creation_with_timeout(self, post, approval_node, admin_user):
assert approval_node.timeout is None
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})
post(url, {'name': 'Test', 'description': 'Approval Node', 'timeout': 10}, user=admin_user, expect=201)
approval_node = WorkflowJobTemplateNode.objects.get(pk=approval_node.pk)
approval_node.refresh_from_db()
assert approval_node.timeout is None
assert isinstance(approval_node.unified_job_template, WorkflowApprovalTemplate)
assert approval_node.unified_job_template.timeout == 10
def test_approval_node_creation_failure(self, post, approval_node, admin_user): def test_approval_node_creation_failure(self, post, approval_node, admin_user):
# This test leaves off a required param to assert that user will get a 400. # This test leaves off a required param to assert that user will get a 400.
url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'}) url = reverse('api:workflow_job_template_node_create_approval', kwargs={'pk': approval_node.pk, 'version': 'v2'})

View File

@@ -706,7 +706,7 @@ def jt_linked(organization, project, inventory, machine_credential, credential,
@pytest.fixture @pytest.fixture
def workflow_job_template(organization): def workflow_job_template(organization):
wjt = WorkflowJobTemplate(name='test-workflow_job_template', organization=organization) wjt = WorkflowJobTemplate.objects.create(name='test-workflow_job_template', organization=organization)
wjt.save() wjt.save()
return wjt return wjt

View File

@@ -64,3 +64,26 @@ class TestSlicingModels:
inventory2 = Inventory.objects.create(organization=organization, name='fooinv') inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)] [inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)]
assert job_template.get_effective_slice_ct({'inventory': inventory2}) assert job_template.get_effective_slice_ct({'inventory': inventory2})
def test_effective_slice_count_prompt(self, job_template, inventory, organization):
job_template.inventory = inventory
# Add our prompt fields to the JT to allow overrides
job_template.ask_job_slice_count_on_launch = True
job_template.ask_inventory_on_launch = True
# Set a default value of the slice count to something low
job_template.job_slice_count = 2
# Create an inventory with 4 nodes
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(4)]
# The inventory slice count will be the min of the number of nodes (4) or the job slice (2)
assert job_template.get_effective_slice_ct({'inventory': inventory2}) == 2
# Now we are going to pass in an override (like the prompt would) and as long as that is < host count we expect that back
assert job_template.get_effective_slice_ct({'inventory': inventory2, 'job_slice_count': 3}) == 3
def test_slice_count_prompt_limited_by_inventory(self, job_template, inventory, organization):
assert inventory.hosts.count() == 0
job_template.inventory = inventory
inventory.hosts.create(name='foo')
unified_job = job_template.create_unified_job(job_slice_count=2)
assert isinstance(unified_job, Job)

View File

@@ -1,7 +1,8 @@
import pytest import pytest
# AWX # AWX
from awx.main.models import JobTemplate, JobLaunchConfig from awx.main.models.jobs import JobTemplate, LaunchTimeConfigBase
from awx.main.models.execution_environments import ExecutionEnvironment
@pytest.fixture @pytest.fixture
@@ -11,18 +12,6 @@ def full_jt(inventory, project, machine_credential):
return jt return jt
@pytest.fixture
def config_factory(full_jt):
def return_config(data):
job = full_jt.create_unified_job(**data)
try:
return job.launch_config
except JobLaunchConfig.DoesNotExist:
return None
return return_config
@pytest.mark.django_db @pytest.mark.django_db
class TestConfigCreation: class TestConfigCreation:
""" """
@@ -40,28 +29,73 @@ class TestConfigCreation:
assert config.limit == 'foobar' assert config.limit == 'foobar'
assert config.char_prompts == {'limit': 'foobar'} assert config.char_prompts == {'limit': 'foobar'}
def test_added_credential(self, full_jt, credential): def test_added_related(self, full_jt, credential, default_instance_group, label):
job = full_jt.create_unified_job(credentials=[credential]) job = full_jt.create_unified_job(credentials=[credential], instance_groups=[default_instance_group], labels=[label])
config = job.launch_config config = job.launch_config
assert set(config.credentials.all()) == set([credential]) assert set(config.credentials.all()) == set([credential])
assert set(config.labels.all()) == set([label])
assert set(config.instance_groups.all()) == set([default_instance_group])
def test_survey_passwords_ignored(self, inventory_source): def test_survey_passwords_ignored(self, inventory_source):
iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'}) iu = inventory_source.create_unified_job(survey_passwords={'foo': '$encrypted$'})
assert iu.launch_config.prompts_dict() == {} assert iu.launch_config.prompts_dict() == {}
@pytest.fixture
def full_prompts_dict(inventory, credential, label, default_instance_group):
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
r = {
'limit': 'foobar',
'inventory': inventory,
'credentials': [credential],
'execution_environment': ee,
'labels': [label],
'instance_groups': [default_instance_group],
'verbosity': 3,
'scm_branch': 'non_dev',
'diff_mode': True,
'skip_tags': 'foobar',
'job_tags': 'untagged',
'forks': 26,
'job_slice_count': 2,
'timeout': 200,
'extra_vars': {'prompted_key': 'prompted_val'},
'job_type': 'check',
}
assert set(JobTemplate.get_ask_mapping().keys()) - set(r.keys()) == set() # make fixture comprehensive
return r
@pytest.mark.django_db @pytest.mark.django_db
class TestConfigReversibility: def test_config_reversibility(full_jt, full_prompts_dict):
""" """
Checks that a blob of saved prompts will be re-created in the Checks that a blob of saved prompts will be re-created in the
prompts_dict for launching new jobs prompts_dict for launching new jobs
""" """
config = full_jt.create_unified_job(**full_prompts_dict).launch_config
assert config.prompts_dict() == full_prompts_dict
def test_char_field_only(self, config_factory):
config = config_factory({'limit': 'foobar'})
assert config.prompts_dict() == {'limit': 'foobar'}
def test_related_objects(self, config_factory, inventory, credential): @pytest.mark.django_db
prompts = {'limit': 'foobar', 'inventory': inventory, 'credentials': set([credential])} class TestLaunchConfigModels:
config = config_factory(prompts) def get_concrete_subclasses(self, cls):
assert config.prompts_dict() == prompts r = []
for c in cls.__subclasses__():
if c._meta.abstract:
r.extend(self.get_concrete_subclasses(c))
else:
r.append(c)
return r
def test_non_job_config_complete(self):
"""This performs model validation which replaces code that used run on import."""
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS:
assert not hasattr(LaunchTimeConfigBase, field_name)
else:
assert hasattr(LaunchTimeConfigBase, field_name)
def test_subclass_fields_complete(self):
for cls in self.get_concrete_subclasses(LaunchTimeConfigBase):
for field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS:
assert hasattr(cls, field_name)

View File

@@ -12,6 +12,9 @@ from awx.main.models.workflow import (
) )
from awx.main.models.jobs import JobTemplate, Job from awx.main.models.jobs import JobTemplate, Job
from awx.main.models.projects import ProjectUpdate from awx.main.models.projects import ProjectUpdate
from awx.main.models.credential import Credential, CredentialType
from awx.main.models.label import Label
from awx.main.models.ha import InstanceGroup
from awx.main.scheduler.dag_workflow import WorkflowDAG from awx.main.scheduler.dag_workflow import WorkflowDAG
from awx.api.versioning import reverse from awx.api.versioning import reverse
from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList from awx.api.views import WorkflowJobTemplateNodeSuccessNodesList
@@ -229,6 +232,65 @@ class TestWorkflowJob:
assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43} assert queued_node.get_job_kwargs()['extra_vars'] == {'a': 42, 'b': 43}
assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43} assert queued_node.ancestor_artifacts == {'a': 42, 'b': 43}
def test_combine_prompts_WFJT_to_node(self, project, inventory, organization):
"""
Test that complex prompts like variables, credentials, labels, etc
are properly combined from the workflow-level with the node-level
"""
jt = JobTemplate.objects.create(
project=project,
inventory=inventory,
ask_variables_on_launch=True,
ask_credential_on_launch=True,
ask_instance_groups_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
)
wj = WorkflowJob.objects.create(name='test-wf-job', extra_vars='{}')
common_ig = InstanceGroup.objects.create(name='common')
common_ct = CredentialType.objects.create(name='common')
node = WorkflowJobNode.objects.create(workflow_job=wj, unified_job_template=jt, extra_vars={'node_key': 'node_val'})
node.limit = 'node_limit'
node.save()
node_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='node'))
node_cred_conflicting = Credential.objects.create(credential_type=common_ct)
node.credentials.add(node_cred_unique, node_cred_conflicting)
node_labels = [Label.objects.create(name='node1', organization=organization), Label.objects.create(name='node2', organization=organization)]
node.labels.add(*node_labels)
node_igs = [common_ig, InstanceGroup.objects.create(name='node')]
for ig in node_igs:
node.instance_groups.add(ig)
# assertions for where node has prompts but workflow job does not
data = node.get_job_kwargs()
assert data['extra_vars'] == {'node_key': 'node_val'}
assert set(data['credentials']) == set([node_cred_conflicting, node_cred_unique])
assert data['instance_groups'] == node_igs
assert set(data['labels']) == set(node_labels)
assert data['limit'] == 'node_limit'
# add prompts to the WorkflowJob
wj.limit = 'wj_limit'
wj.extra_vars = {'wj_key': 'wj_val'}
wj.save()
wj_cred_unique = Credential.objects.create(credential_type=CredentialType.objects.create(name='wj'))
wj_cred_conflicting = Credential.objects.create(credential_type=common_ct)
wj.credentials.add(wj_cred_unique, wj_cred_conflicting)
wj.labels.add(Label.objects.create(name='wj1', organization=organization), Label.objects.create(name='wj2', organization=organization))
wj_igs = [InstanceGroup.objects.create(name='wj'), common_ig]
for ig in wj_igs:
wj.instance_groups.add(ig)
# assertions for behavior where node and workflow jobs have prompts
data = node.get_job_kwargs()
assert data['extra_vars'] == {'node_key': 'node_val', 'wj_key': 'wj_val'}
assert set(data['credentials']) == set([wj_cred_unique, wj_cred_conflicting, node_cred_unique])
assert data['instance_groups'] == wj_igs
assert set(data['labels']) == set(node_labels) # as exception, WFJT labels not applied
assert data['limit'] == 'wj_limit'
@pytest.mark.django_db @pytest.mark.django_db
class TestWorkflowJobTemplate: class TestWorkflowJobTemplate:
@@ -287,12 +349,25 @@ class TestWorkflowJobTemplatePrompts:
@pytest.fixture @pytest.fixture
def wfjt_prompts(self): def wfjt_prompts(self):
return WorkflowJobTemplate.objects.create( return WorkflowJobTemplate.objects.create(
ask_inventory_on_launch=True, ask_variables_on_launch=True, ask_limit_on_launch=True, ask_scm_branch_on_launch=True ask_variables_on_launch=True,
ask_inventory_on_launch=True,
ask_tags_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
ask_scm_branch_on_launch=True,
ask_skip_tags_on_launch=True,
) )
@pytest.fixture @pytest.fixture
def prompts_data(self, inventory): def prompts_data(self, inventory):
return dict(inventory=inventory, extra_vars={'foo': 'bar'}, limit='webservers', scm_branch='release-3.3') return dict(
inventory=inventory,
extra_vars={'foo': 'bar'},
limit='webservers',
scm_branch='release-3.3',
job_tags='foo',
skip_tags='bar',
)
def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory): def test_apply_workflow_job_prompts(self, workflow_job_template, wfjt_prompts, prompts_data, inventory):
# null or empty fields used # null or empty fields used
@@ -300,6 +375,9 @@ class TestWorkflowJobTemplatePrompts:
assert workflow_job.limit is None assert workflow_job.limit is None
assert workflow_job.inventory is None assert workflow_job.inventory is None
assert workflow_job.scm_branch is None assert workflow_job.scm_branch is None
assert workflow_job.job_tags is None
assert workflow_job.skip_tags is None
assert len(workflow_job.labels.all()) is 0
# fields from prompts used # fields from prompts used
workflow_job = workflow_job_template.create_unified_job(**prompts_data) workflow_job = workflow_job_template.create_unified_job(**prompts_data)
@@ -307,15 +385,21 @@ class TestWorkflowJobTemplatePrompts:
assert workflow_job.limit == 'webservers' assert workflow_job.limit == 'webservers'
assert workflow_job.inventory == inventory assert workflow_job.inventory == inventory
assert workflow_job.scm_branch == 'release-3.3' assert workflow_job.scm_branch == 'release-3.3'
assert workflow_job.job_tags == 'foo'
assert workflow_job.skip_tags == 'bar'
# non-null fields from WFJT used # non-null fields from WFJT used
workflow_job_template.inventory = inventory workflow_job_template.inventory = inventory
workflow_job_template.limit = 'fooo' workflow_job_template.limit = 'fooo'
workflow_job_template.scm_branch = 'bar' workflow_job_template.scm_branch = 'bar'
workflow_job_template.job_tags = 'baz'
workflow_job_template.skip_tags = 'dinosaur'
workflow_job = workflow_job_template.create_unified_job() workflow_job = workflow_job_template.create_unified_job()
assert workflow_job.limit == 'fooo' assert workflow_job.limit == 'fooo'
assert workflow_job.inventory == inventory assert workflow_job.inventory == inventory
assert workflow_job.scm_branch == 'bar' assert workflow_job.scm_branch == 'bar'
assert workflow_job.job_tags == 'baz'
assert workflow_job.skip_tags == 'dinosaur'
@pytest.mark.django_db @pytest.mark.django_db
def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data): def test_process_workflow_job_prompts(self, inventory, workflow_job_template, wfjt_prompts, prompts_data):
@@ -340,12 +424,19 @@ class TestWorkflowJobTemplatePrompts:
ask_limit_on_launch=True, ask_limit_on_launch=True,
scm_branch='bar', scm_branch='bar',
ask_scm_branch_on_launch=True, ask_scm_branch_on_launch=True,
job_tags='foo',
skip_tags='bar',
), ),
user=org_admin, user=org_admin,
expect=201, expect=201,
) )
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id']) wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.char_prompts == {'limit': 'foooo', 'scm_branch': 'bar'} assert wfjt.char_prompts == {
'limit': 'foooo',
'scm_branch': 'bar',
'job_tags': 'foo',
'skip_tags': 'bar',
}
assert wfjt.ask_scm_branch_on_launch is True assert wfjt.ask_scm_branch_on_launch is True
assert wfjt.ask_limit_on_launch is True assert wfjt.ask_limit_on_launch is True
@@ -355,6 +446,67 @@ class TestWorkflowJobTemplatePrompts:
assert r.data['limit'] == 'prompt_limit' assert r.data['limit'] == 'prompt_limit'
assert r.data['scm_branch'] == 'prompt_branch' assert r.data['scm_branch'] == 'prompt_branch'
@pytest.mark.django_db
def test_set_all_ask_for_prompts_false_from_post(self, post, organization, inventory, org_admin):
'''
Tests default behaviour and values of ask_for_* fields on WFJT via POST
'''
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
inventory=inventory.id,
job_tags='',
skip_tags='',
),
user=org_admin,
expect=201,
)
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.ask_inventory_on_launch is False
assert wfjt.ask_labels_on_launch is False
assert wfjt.ask_limit_on_launch is False
assert wfjt.ask_scm_branch_on_launch is False
assert wfjt.ask_skip_tags_on_launch is False
assert wfjt.ask_tags_on_launch is False
assert wfjt.ask_variables_on_launch is False
@pytest.mark.django_db
def test_set_all_ask_for_prompts_true_from_post(self, post, organization, inventory, org_admin):
'''
Tests behaviour and values of ask_for_* fields on WFJT via POST
'''
r = post(
url=reverse('api:workflow_job_template_list'),
data=dict(
name='workflow that tests ask_for prompts',
organization=organization.id,
inventory=inventory.id,
job_tags='',
skip_tags='',
ask_inventory_on_launch=True,
ask_labels_on_launch=True,
ask_limit_on_launch=True,
ask_scm_branch_on_launch=True,
ask_skip_tags_on_launch=True,
ask_tags_on_launch=True,
ask_variables_on_launch=True,
),
user=org_admin,
expect=201,
)
wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])
assert wfjt.ask_inventory_on_launch is True
assert wfjt.ask_labels_on_launch is True
assert wfjt.ask_limit_on_launch is True
assert wfjt.ask_scm_branch_on_launch is True
assert wfjt.ask_skip_tags_on_launch is True
assert wfjt.ask_tags_on_launch is True
assert wfjt.ask_variables_on_launch is True
@pytest.mark.django_db @pytest.mark.django_db
def test_workflow_ancestors(organization): def test_workflow_ancestors(organization):

View File

@@ -6,12 +6,19 @@ from awx.main.utils import decrypt_field
from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobTemplateNode, WorkflowApprovalTemplate
from awx.main.models.jobs import JobTemplate from awx.main.models.jobs import JobTemplate
from awx.main.tasks.system import deep_copy_model_obj from awx.main.tasks.system import deep_copy_model_obj
from awx.main.models import Label, ExecutionEnvironment, InstanceGroup
@pytest.mark.django_db @pytest.mark.django_db
def test_job_template_copy(post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin): def test_job_template_copy(
post, get, project, inventory, machine_credential, vault_credential, credential, alice, job_template_with_survey_passwords, admin, organization
):
label = Label.objects.create(name="foobar", organization=organization)
ig = InstanceGroup.objects.create(name="bazbar", organization=organization)
job_template_with_survey_passwords.project = project job_template_with_survey_passwords.project = project
job_template_with_survey_passwords.inventory = inventory job_template_with_survey_passwords.inventory = inventory
job_template_with_survey_passwords.labels.add(label)
job_template_with_survey_passwords.instance_groups.add(ig)
job_template_with_survey_passwords.save() job_template_with_survey_passwords.save()
job_template_with_survey_passwords.credentials.add(credential) job_template_with_survey_passwords.credentials.add(credential)
job_template_with_survey_passwords.credentials.add(machine_credential) job_template_with_survey_passwords.credentials.add(machine_credential)
@@ -54,6 +61,10 @@ def test_job_template_copy(post, get, project, inventory, machine_credential, va
assert vault_credential in jt_copy.credentials.all() assert vault_credential in jt_copy.credentials.all()
assert machine_credential in jt_copy.credentials.all() assert machine_credential in jt_copy.credentials.all()
assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec assert job_template_with_survey_passwords.survey_spec == jt_copy.survey_spec
assert jt_copy.labels.count() != 0
assert jt_copy.labels.get(pk=label.pk) == label
assert jt_copy.instance_groups.count() != 0
assert jt_copy.instance_groups.get(pk=ig.pk) == ig
@pytest.mark.django_db @pytest.mark.django_db
@@ -109,8 +120,22 @@ def test_inventory_copy(inventory, group_factory, post, get, alice, organization
@pytest.mark.django_db @pytest.mark.django_db
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization): def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):
'''
Tests the FIELDS_TO_PRESERVE_AT_COPY attribute on WFJTs
'''
workflow_job_template.organization = organization workflow_job_template.organization = organization
label = Label.objects.create(name="foobar", organization=organization)
workflow_job_template.labels.add(label)
ee = ExecutionEnvironment.objects.create(name="barfoo", organization=organization)
workflow_job_template.execution_environment = ee
ig = InstanceGroup.objects.create(name="bazbar", organization=organization)
workflow_job_template.instance_groups.add(ig)
workflow_job_template.save() workflow_job_template.save()
jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)] jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)]
nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)] nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)]
nodes[0].success_nodes.add(nodes[1]) nodes[0].success_nodes.add(nodes[1])
@@ -124,9 +149,16 @@ def test_workflow_job_template_copy(workflow_job_template, post, get, admin, org
wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id) wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id)
args, kwargs = deep_copy_mock.call_args args, kwargs = deep_copy_mock.call_args
deep_copy_model_obj(*args, **kwargs) deep_copy_model_obj(*args, **kwargs)
assert wfjt_copy.organization == organization assert wfjt_copy.organization == organization
assert wfjt_copy.created_by == admin assert wfjt_copy.created_by == admin
assert wfjt_copy.name == 'new wfjt name' assert wfjt_copy.name == 'new wfjt name'
assert wfjt_copy.labels.count() != 0
assert wfjt_copy.labels.get(pk=label.pk) == label
assert wfjt_copy.execution_environment == ee
assert wfjt_copy.instance_groups.count() != 0
assert wfjt_copy.instance_groups.get(pk=ig.pk) == ig
copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()] copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()]
copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1])) copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1]))
for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]): for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]):

View File

@@ -417,3 +417,31 @@ class TestInstanceGroupOrdering:
assert job.preferred_instance_groups == [ig_inv, ig_org] assert job.preferred_instance_groups == [ig_inv, ig_org]
job.job_template.instance_groups.add(ig_tmp) job.job_template.instance_groups.add(ig_tmp)
assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org] assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org]
def test_job_instance_groups_cache_default(self, instance_group_factory, inventory, project, default_instance_group):
jt = JobTemplate.objects.create(inventory=inventory, project=project)
job = jt.create_unified_job()
print(job.preferred_instance_groups_cache)
print(default_instance_group)
assert job.preferred_instance_groups_cache == [default_instance_group.id]
def test_job_instance_groups_cache_default_additional_items(self, instance_group_factory, inventory, project, default_instance_group):
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
project.organization.instance_groups.add(ig_org)
inventory.instance_groups.add(ig_inv)
jt = JobTemplate.objects.create(inventory=inventory, project=project)
jt.instance_groups.add(ig_tmp)
job = jt.create_unified_job()
assert job.preferred_instance_groups_cache == [ig_tmp.id, ig_inv.id, ig_org.id]
def test_job_instance_groups_cache_prompt(self, instance_group_factory, inventory, project, default_instance_group):
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
project.organization.instance_groups.add(ig_org)
inventory.instance_groups.add(ig_inv)
jt = JobTemplate.objects.create(inventory=inventory, project=project)
job = jt.create_unified_job(instance_groups=[ig_tmp])
assert job.preferred_instance_groups_cache == [ig_tmp.id]

View File

@@ -3,7 +3,20 @@ import pytest
from unittest import mock from unittest import mock
import json import json
from awx.main.models import Job, Instance, JobHostSummary, InventoryUpdate, InventorySource, Project, ProjectUpdate, SystemJob, AdHocCommand from awx.main.models import (
Job,
Instance,
JobHostSummary,
InventoryUpdate,
InventorySource,
Project,
ProjectUpdate,
SystemJob,
AdHocCommand,
InstanceGroup,
Label,
ExecutionEnvironment,
)
from awx.main.tasks.system import cluster_node_heartbeat from awx.main.tasks.system import cluster_node_heartbeat
from django.test.utils import override_settings from django.test.utils import override_settings
@@ -103,14 +116,88 @@ def test_job_notification_host_data(inventory, machine_credential, project, job_
class TestLaunchConfig: class TestLaunchConfig:
def test_null_creation_from_prompts(self): def test_null_creation_from_prompts(self):
job = Job.objects.create() job = Job.objects.create()
data = {"credentials": [], "extra_vars": {}, "limit": None, "job_type": None} data = {
"credentials": [],
"extra_vars": {},
"limit": None,
"job_type": None,
"execution_environment": None,
"instance_groups": None,
"labels": None,
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data) config = job.create_config_from_prompts(data)
assert config is None assert config is None
def test_only_limit_defined(self, job_template): def test_only_limit_defined(self, job_template):
job = Job.objects.create(job_template=job_template) job = Job.objects.create(job_template=job_template)
data = {"credentials": [], "extra_vars": {}, "job_tags": None, "limit": ""} data = {
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": "",
"execution_environment": None,
"instance_groups": None,
"labels": None,
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data) config = job.create_config_from_prompts(data)
assert config.char_prompts == {"limit": ""} assert config.char_prompts == {"limit": ""}
assert not config.credentials.exists() assert not config.credentials.exists()
assert config.prompts_dict() == {"limit": ""} assert config.prompts_dict() == {"limit": ""}
def test_many_to_many_fields(self, job_template, organization):
job = Job.objects.create(job_template=job_template)
ig1 = InstanceGroup.objects.create(name='bar')
ig2 = InstanceGroup.objects.create(name='foo')
job_template.instance_groups.add(ig2)
label1 = Label.objects.create(name='foo', description='bar', organization=organization)
label2 = Label.objects.create(name='faz', description='baz', organization=organization)
# Order should matter here which is why we do 2 and then 1
data = {
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": None,
"execution_environment": None,
"instance_groups": [ig2, ig1],
"labels": [label2, label1],
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data)
assert config.instance_groups.exists()
config_instance_group_ids = [item.id for item in config.instance_groups.all()]
assert config_instance_group_ids == [ig2.id, ig1.id]
assert config.labels.exists()
config_label_ids = [item.id for item in config.labels.all()]
assert config_label_ids == [label2.id, label1.id]
def test_pk_field(self, job_template, organization):
job = Job.objects.create(job_template=job_template)
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
# Order should matter here which is why we do 2 and then 1
data = {
"credentials": [],
"extra_vars": {},
"job_tags": None,
"limit": None,
"execution_environment": ee,
"instance_groups": [],
"labels": [],
"forks": None,
"timeout": None,
"job_slice_count": None,
}
config = job.create_config_from_prompts(data)
assert config.execution_environment
# We just write the PK instead of trying to assign an item, that happens on the save
assert config.execution_environment_id == ee.id

View File

@@ -3,7 +3,20 @@ import pytest
from rest_framework.exceptions import PermissionDenied from rest_framework.exceptions import PermissionDenied
from awx.main.access import JobAccess, JobLaunchConfigAccess, AdHocCommandAccess, InventoryUpdateAccess, ProjectUpdateAccess from awx.main.access import JobAccess, JobLaunchConfigAccess, AdHocCommandAccess, InventoryUpdateAccess, ProjectUpdateAccess
from awx.main.models import Job, JobLaunchConfig, JobTemplate, AdHocCommand, InventoryUpdate, InventorySource, ProjectUpdate, User, Credential from awx.main.models import (
Job,
JobLaunchConfig,
JobTemplate,
AdHocCommand,
InventoryUpdate,
InventorySource,
ProjectUpdate,
User,
Credential,
ExecutionEnvironment,
InstanceGroup,
Label,
)
from crum import impersonate from crum import impersonate
@@ -302,13 +315,33 @@ class TestLaunchConfigAccess:
access = JobLaunchConfigAccess(rando) access = JobLaunchConfigAccess(rando)
cred1, cred2 = self._make_two_credentials(credentialtype_ssh) cred1, cred2 = self._make_two_credentials(credentialtype_ssh)
assert access.has_credentials_access(config) # has access if 0 creds assert access.has_obj_m2m_access(config) # has access if 0 creds
config.credentials.add(cred1, cred2) config.credentials.add(cred1, cred2)
assert not access.has_credentials_access(config) # lacks access to both assert not access.has_obj_m2m_access(config) # lacks access to both
cred1.use_role.members.add(rando) cred1.use_role.members.add(rando)
assert not access.has_credentials_access(config) # lacks access to 1 assert not access.has_obj_m2m_access(config) # lacks access to 1
cred2.use_role.members.add(rando) cred2.use_role.members.add(rando)
assert access.has_credentials_access(config) # has access to both assert access.has_obj_m2m_access(config) # has access to both
def test_new_execution_environment_access(self, rando):
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
access = JobLaunchConfigAccess(rando)
assert access.can_add({'execution_environment': ee}) # can add because access to ee will be granted
def test_new_label_access(self, rando, organization):
label = Label.objects.create(name='foo', description='bar', organization=organization)
access = JobLaunchConfigAccess(rando)
assert not access.can_add({'labels': [label]}) # can't add because no access to label
# We assert in JT unit tests that the access will be granted if label is in JT
def test_new_instance_group_access(self, rando):
ig = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
access = JobLaunchConfigAccess(rando)
assert not access.can_add({'instance_groups': [ig]}) # can't add because no access to ig
# We assert in JT unit tests that the access will be granted if instance group is in JT
def test_can_use_minor(self, rando): def test_can_use_minor(self, rando):
# Config object only has flat-field overrides, no RBAC restrictions # Config object only has flat-field overrides, no RBAC restrictions

View File

@@ -6,6 +6,7 @@ from awx.main.access import (
WorkflowJobAccess, WorkflowJobAccess,
# WorkflowJobNodeAccess # WorkflowJobNodeAccess
) )
from awx.main.models import JobTemplate, WorkflowJobTemplateNode
from rest_framework.exceptions import PermissionDenied from rest_framework.exceptions import PermissionDenied
@@ -87,6 +88,16 @@ class TestWorkflowJobTemplateNodeAccess:
job_template.read_role.members.add(rando) job_template.read_role.members.add(rando)
assert not access.can_add({'workflow_job_template': wfjt, 'unified_job_template': job_template}) assert not access.can_add({'workflow_job_template': wfjt, 'unified_job_template': job_template})
def test_change_JT_no_start_perm(self, wfjt, rando):
wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando)
jt1 = JobTemplate.objects.create()
jt1.execute_role.members.add(rando)
assert access.can_add({'workflow_job_template': wfjt, 'unified_job_template': jt1})
node = WorkflowJobTemplateNode.objects.create(workflow_job_template=wfjt, unified_job_template=jt1)
jt2 = JobTemplate.objects.create()
assert not access.can_change(node, {'unified_job_template': jt2.id})
def test_add_node_with_minimum_permissions(self, wfjt, job_template, inventory, rando): def test_add_node_with_minimum_permissions(self, wfjt, job_template, inventory, rando):
wfjt.admin_role.members.add(rando) wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando) access = WorkflowJobTemplateNodeAccess(rando)
@@ -101,6 +112,92 @@ class TestWorkflowJobTemplateNodeAccess:
access = WorkflowJobTemplateNodeAccess(rando) access = WorkflowJobTemplateNodeAccess(rando)
assert access.can_delete(wfjt_node) assert access.can_delete(wfjt_node)
@pytest.mark.parametrize(
"add_wfjt_admin, add_jt_admin, permission_type, expected_result, method_type",
[
(True, False, 'credentials', False, 'can_attach'),
(True, True, 'credentials', True, 'can_attach'),
(True, False, 'labels', False, 'can_attach'),
(True, True, 'labels', True, 'can_attach'),
(True, False, 'instance_groups', False, 'can_attach'),
(True, True, 'instance_groups', True, 'can_attach'),
(True, False, 'credentials', False, 'can_unattach'),
(True, True, 'credentials', True, 'can_unattach'),
(True, False, 'labels', False, 'can_unattach'),
(True, True, 'labels', True, 'can_unattach'),
(True, False, 'instance_groups', False, 'can_unattach'),
(True, True, 'instance_groups', True, 'can_unattach'),
],
)
def test_attacher_permissions(self, wfjt_node, job_template, rando, add_wfjt_admin, permission_type, add_jt_admin, expected_result, mocker, method_type):
wfjt = wfjt_node.workflow_job_template
if add_wfjt_admin:
wfjt.admin_role.members.add(rando)
wfjt.unified_job_template = job_template
if add_jt_admin:
job_template.execute_role.members.add(rando)
from awx.main.models import Credential, Label, InstanceGroup, Organization, CredentialType
if permission_type == 'credentials':
sub_obj = Credential.objects.create(credential_type=CredentialType.objects.create())
sub_obj.use_role.members.add(rando)
elif permission_type == 'labels':
sub_obj = Label.objects.create(organization=Organization.objects.create())
sub_obj.organization.member_role.members.add(rando)
elif permission_type == 'instance_groups':
sub_obj = InstanceGroup.objects.create()
org = Organization.objects.create()
org.admin_role.members.add(rando) # only admins can see IGs
org.instance_groups.add(sub_obj)
access = WorkflowJobTemplateNodeAccess(rando)
if method_type == 'can_unattach':
assert getattr(access, method_type)(wfjt_node, sub_obj, permission_type) == expected_result
else:
assert getattr(access, method_type)(wfjt_node, sub_obj, permission_type, {}) == expected_result
# The actual attachment of labels, credentials and instance groups are tested from JobLaunchConfigAccess
@pytest.mark.parametrize(
"attachment_type, expect_exception, method_type",
[
("credentials", False, 'can_attach'),
("labels", False, 'can_attach'),
("instance_groups", False, 'can_attach'),
("success_nodes", False, 'can_attach'),
("failure_nodes", False, 'can_attach'),
("always_nodes", False, 'can_attach'),
("junk", True, 'can_attach'),
("credentials", False, 'can_unattach'),
("labels", False, 'can_unattach'),
("instance_groups", False, 'can_unattach'),
("success_nodes", False, 'can_unattach'),
("failure_nodes", False, 'can_unattach'),
("always_nodes", False, 'can_unattach'),
("junk", True, 'can_unattach'),
],
)
def test_attacher_raise_not_implemented(self, wfjt_node, rando, attachment_type, expect_exception, method_type):
wfjt = wfjt_node.workflow_job_template
wfjt.admin_role.members.add(rando)
access = WorkflowJobTemplateNodeAccess(rando)
if expect_exception:
with pytest.raises(NotImplementedError):
access.can_attach(wfjt_node, None, attachment_type, None)
else:
try:
getattr(access, method_type)(wfjt_node, None, attachment_type, None)
except NotImplementedError:
# We explicitly catch NotImplemented because the _nodes type will raise a different exception
assert False, "Exception was raised when it should not have been"
except Exception:
# File "/awx_devel/awx/main/access.py", line 2074, in check_same_WFJT
# raise Exception('Attaching workflow nodes only allowed for other nodes')
pass
# TODO: Implement additional tests for _nodes attachments here
@pytest.mark.django_db @pytest.mark.django_db
class TestWorkflowJobAccess: class TestWorkflowJobAccess:

View File

@@ -8,9 +8,17 @@ from rest_framework.exceptions import ValidationError
from awx.api.serializers import JobLaunchSerializer from awx.api.serializers import JobLaunchSerializer
def test_primary_key_related_field(): @pytest.mark.parametrize(
"param",
[
('credentials'),
('instance_groups'),
('labels'),
],
)
def test_primary_key_related_field(param):
# We are testing if the PrimaryKeyRelatedField in this serializer can take dictionary. # We are testing if the PrimaryKeyRelatedField in this serializer can take dictionary.
# PrimaryKeyRelatedField should not be able to take dictionary as input, and should raise a ValidationError. # PrimaryKeyRelatedField should not be able to take dictionary as input, and should raise a ValidationError.
data = {'credentials': {'1': '2', '3': '4'}} data = {param: {'1': '2', '3': '4'}}
with pytest.raises(ValidationError): with pytest.raises(ValidationError):
JobLaunchSerializer(data=data) JobLaunchSerializer(data=data)

View File

@@ -11,6 +11,7 @@ from awx.api.serializers import (
from awx.main.models import Job, WorkflowJobTemplateNode, WorkflowJob, WorkflowJobNode, WorkflowJobTemplate, Project, Inventory, JobTemplate from awx.main.models import Job, WorkflowJobTemplateNode, WorkflowJob, WorkflowJobNode, WorkflowJobTemplate, Project, Inventory, JobTemplate
@pytest.mark.django_db
@mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x, y: {}) @mock.patch('awx.api.serializers.UnifiedJobTemplateSerializer.get_related', lambda x, y: {})
class TestWorkflowJobTemplateSerializerGetRelated: class TestWorkflowJobTemplateSerializerGetRelated:
@pytest.fixture @pytest.fixture
@@ -26,6 +27,7 @@ class TestWorkflowJobTemplateSerializerGetRelated:
'launch', 'launch',
'workflow_nodes', 'workflow_nodes',
'webhook_key', 'webhook_key',
'labels',
], ],
) )
def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name): def test_get_related(self, mocker, test_get_related, workflow_job_template, related_resource_name):
@@ -58,6 +60,7 @@ class TestWorkflowNodeBaseSerializerGetRelated:
assert 'unified_job_template' not in related assert 'unified_job_template' not in related
@pytest.mark.django_db
@mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x, y: {}) @mock.patch('awx.api.serializers.BaseSerializer.get_related', lambda x, y: {})
class TestWorkflowJobTemplateNodeSerializerGetRelated: class TestWorkflowJobTemplateNodeSerializerGetRelated:
@pytest.fixture @pytest.fixture
@@ -87,6 +90,8 @@ class TestWorkflowJobTemplateNodeSerializerGetRelated:
'success_nodes', 'success_nodes',
'failure_nodes', 'failure_nodes',
'always_nodes', 'always_nodes',
'labels',
'instance_groups',
], ],
) )
def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name): def test_get_related(self, test_get_related, workflow_job_template_node, related_resource_name):
@@ -146,6 +151,7 @@ class TestWorkflowJobTemplateNodeSerializerCharPrompts:
assert WFJT_serializer.instance.limit == 'webservers' assert WFJT_serializer.instance.limit == 'webservers'
@pytest.mark.django_db
@mock.patch('awx.api.serializers.BaseSerializer.validate', lambda self, attrs: attrs) @mock.patch('awx.api.serializers.BaseSerializer.validate', lambda self, attrs: attrs)
class TestWorkflowJobTemplateNodeSerializerSurveyPasswords: class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
@pytest.fixture @pytest.fixture
@@ -162,7 +168,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
def test_set_survey_passwords_create(self, jt): def test_set_survey_passwords_create(self, jt):
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate(name='fake-wfjt') wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt')
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}})
assert 'survey_passwords' in attrs assert 'survey_passwords' in attrs
assert 'var1' in attrs['survey_passwords'] assert 'var1' in attrs['survey_passwords']
@@ -171,7 +177,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
def test_set_survey_passwords_modify(self, jt): def test_set_survey_passwords_modify(self, jt):
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate(name='fake-wfjt') wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt')
serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt) serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt)
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': 'secret_answer'}})
assert 'survey_passwords' in attrs assert 'survey_passwords' in attrs
@@ -181,7 +187,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
def test_use_db_answer(self, jt, mocker): def test_use_db_answer(self, jt, mocker):
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate(name='fake-wfjt') wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt')
serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt, extra_data={'var1': '$encrypted$foooooo'}) serializer.instance = WorkflowJobTemplateNode(workflow_job_template=wfjt, unified_job_template=jt, extra_data={'var1': '$encrypted$foooooo'})
with mocker.patch('awx.main.models.mixins.decrypt_value', return_value='foo'): with mocker.patch('awx.main.models.mixins.decrypt_value', return_value='foo'):
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}})
@@ -196,7 +202,7 @@ class TestWorkflowJobTemplateNodeSerializerSurveyPasswords:
with that particular var omitted so on launch time the default takes effect with that particular var omitted so on launch time the default takes effect
""" """
serializer = WorkflowJobTemplateNodeSerializer() serializer = WorkflowJobTemplateNodeSerializer()
wfjt = WorkflowJobTemplate(name='fake-wfjt') wfjt = WorkflowJobTemplate.objects.create(name='fake-wfjt')
jt.survey_spec['spec'][0]['default'] = '$encrypted$bar' jt.survey_spec['spec'][0]['default'] = '$encrypted$bar'
attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}}) attrs = serializer.validate({'unified_job_template': jt, 'workflow_job_template': wfjt, 'extra_data': {'var1': '$encrypted$'}})
assert 'survey_passwords' in attrs assert 'survey_passwords' in attrs
@@ -230,6 +236,8 @@ class TestWorkflowJobNodeSerializerGetRelated:
'success_nodes', 'success_nodes',
'failure_nodes', 'failure_nodes',
'always_nodes', 'always_nodes',
'labels',
'instance_groups',
], ],
) )
def test_get_related(self, test_get_related, workflow_job_node, related_resource_name): def test_get_related(self, test_get_related, workflow_job_node, related_resource_name):

View File

@@ -59,7 +59,7 @@ class TestApiRootView:
class TestJobTemplateLabelList: class TestJobTemplateLabelList:
def test_inherited_mixin_unattach(self): def test_inherited_mixin_unattach(self):
with mock.patch('awx.api.generics.DeleteLastUnattachLabelMixin.unattach') as mixin_unattach: with mock.patch('awx.api.views.labels.LabelSubListCreateAttachDetachView.unattach') as mixin_unattach:
view = JobTemplateLabelList() view = JobTemplateLabelList()
mock_request = mock.MagicMock() mock_request = mock.MagicMock()

View File

@@ -1,9 +1,15 @@
import pytest import pytest
from unittest import mock from unittest import mock
from awx.main.models.label import Label from awx.main.models import (
from awx.main.models.unified_jobs import UnifiedJobTemplate, UnifiedJob Label,
from awx.main.models.inventory import Inventory UnifiedJobTemplate,
UnifiedJob,
Inventory,
Schedule,
WorkflowJobTemplateNode,
WorkflowJobNode,
)
mock_query_set = mock.MagicMock() mock_query_set = mock.MagicMock()
@@ -14,12 +20,6 @@ mock_objects = mock.MagicMock(filter=mock.MagicMock(return_value=mock_query_set)
@pytest.mark.django_db @pytest.mark.django_db
@mock.patch('awx.main.models.label.Label.objects', mock_objects) @mock.patch('awx.main.models.label.Label.objects', mock_objects)
class TestLabelFilterMocked: class TestLabelFilterMocked:
def test_get_orphaned_labels(self, mocker):
ret = Label.get_orphaned_labels()
assert mock_query_set == ret
Label.objects.filter.assert_called_with(organization=None, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True)
def test_is_detached(self, mocker): def test_is_detached(self, mocker):
mock_query_set.exists.return_value = True mock_query_set.exists.return_value = True
@@ -27,7 +27,15 @@ class TestLabelFilterMocked:
ret = label.is_detached() ret = label.is_detached()
assert ret is True assert ret is True
Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True) Label.objects.filter.assert_called_with(
id=37,
unifiedjob_labels__isnull=True,
unifiedjobtemplate_labels__isnull=True,
inventory_labels__isnull=True,
schedule_labels__isnull=True,
workflowjobtemplatenode_labels__isnull=True,
workflowjobnode_labels__isnull=True,
)
mock_query_set.exists.assert_called_with() mock_query_set.exists.assert_called_with()
def test_is_detached_not(self, mocker): def test_is_detached_not(self, mocker):
@@ -37,39 +45,102 @@ class TestLabelFilterMocked:
ret = label.is_detached() ret = label.is_detached()
assert ret is False assert ret is False
Label.objects.filter.assert_called_with(id=37, unifiedjob_labels__isnull=True, unifiedjobtemplate_labels__isnull=True, inventory_labels__isnull=True) Label.objects.filter.assert_called_with(
id=37,
unifiedjob_labels__isnull=True,
unifiedjobtemplate_labels__isnull=True,
inventory_labels__isnull=True,
schedule_labels__isnull=True,
workflowjobtemplatenode_labels__isnull=True,
workflowjobnode_labels__isnull=True,
)
mock_query_set.exists.assert_called_with() mock_query_set.exists.assert_called_with()
@pytest.mark.parametrize( @pytest.mark.parametrize(
"jt_count,j_count,inv_count,expected", "jt_count,j_count,inv_count,sched_count,wfnode_count,wfnodej_count,expected",
[ [
(1, 0, 0, True), (1, 0, 0, 0, 0, 0, True),
(0, 1, 0, True), (0, 1, 0, 0, 0, 0, True),
(0, 0, 1, True), (1, 1, 0, 0, 0, 0, False),
(1, 1, 1, False), (0, 0, 1, 0, 0, 0, True),
(1, 0, 1, 0, 0, 0, False),
(0, 1, 1, 0, 0, 0, False),
(1, 1, 1, 0, 0, 0, False),
(0, 0, 0, 1, 0, 0, True),
(1, 0, 0, 1, 0, 0, False),
(0, 1, 0, 1, 0, 0, False),
(1, 1, 0, 1, 0, 0, False),
(0, 0, 1, 1, 0, 0, False),
(1, 0, 1, 1, 0, 0, False),
(0, 1, 1, 1, 0, 0, False),
(1, 1, 1, 1, 0, 0, False),
(0, 0, 0, 0, 1, 0, True),
(1, 0, 0, 0, 1, 0, False),
(0, 1, 0, 0, 1, 0, False),
(1, 1, 0, 0, 1, 0, False),
(0, 0, 1, 0, 1, 0, False),
(1, 0, 1, 0, 1, 0, False),
(0, 1, 1, 0, 1, 0, False),
(1, 1, 1, 0, 1, 0, False),
(0, 0, 0, 1, 1, 0, False),
(1, 0, 0, 1, 1, 0, False),
(0, 1, 0, 1, 1, 0, False),
(1, 1, 0, 1, 1, 0, False),
(0, 0, 1, 1, 1, 0, False),
(1, 0, 1, 1, 1, 0, False),
(0, 1, 1, 1, 1, 0, False),
(1, 1, 1, 1, 1, 0, False),
(0, 0, 0, 0, 0, 1, True),
(1, 0, 0, 0, 0, 1, False),
(0, 1, 0, 0, 0, 1, False),
(1, 1, 0, 0, 0, 1, False),
(0, 0, 1, 0, 0, 1, False),
(1, 0, 1, 0, 0, 1, False),
(0, 1, 1, 0, 0, 1, False),
(1, 1, 1, 0, 0, 1, False),
(0, 0, 0, 1, 0, 1, False),
(1, 0, 0, 1, 0, 1, False),
(0, 1, 0, 1, 0, 1, False),
(1, 1, 0, 1, 0, 1, False),
(0, 0, 1, 1, 0, 1, False),
(1, 0, 1, 1, 0, 1, False),
(0, 1, 1, 1, 0, 1, False),
(1, 1, 1, 1, 0, 1, False),
(0, 0, 0, 0, 1, 1, False),
(1, 0, 0, 0, 1, 1, False),
(0, 1, 0, 0, 1, 1, False),
(1, 1, 0, 0, 1, 1, False),
(0, 0, 1, 0, 1, 1, False),
(1, 0, 1, 0, 1, 1, False),
(0, 1, 1, 0, 1, 1, False),
(1, 1, 1, 0, 1, 1, False),
(0, 0, 0, 1, 1, 1, False),
(1, 0, 0, 1, 1, 1, False),
(0, 1, 0, 1, 1, 1, False),
(1, 1, 0, 1, 1, 1, False),
(0, 0, 1, 1, 1, 1, False),
(1, 0, 1, 1, 1, 1, False),
(0, 1, 1, 1, 1, 1, False),
(1, 1, 1, 1, 1, 1, False),
], ],
) )
def test_is_candidate_for_detach(self, mocker, jt_count, j_count, inv_count, expected): def test_is_candidate_for_detach(self, mocker, jt_count, j_count, inv_count, sched_count, wfnode_count, wfnodej_count, expected):
mock_job_qs = mocker.MagicMock() counts = [jt_count, j_count, inv_count, sched_count, wfnode_count, wfnodej_count]
mock_job_qs.count = mocker.MagicMock(return_value=j_count) models = [UnifiedJobTemplate, UnifiedJob, Inventory, Schedule, WorkflowJobTemplateNode, WorkflowJobNode]
mocker.patch.object(UnifiedJob, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_job_qs))) mockers = []
for index in range(0, len(models)):
mock_jt_qs = mocker.MagicMock() a_mocker = mocker.MagicMock()
mock_jt_qs.count = mocker.MagicMock(return_value=jt_count) a_mocker.count = mocker.MagicMock(return_value=counts[index])
mocker.patch.object(UnifiedJobTemplate, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_jt_qs))) mocker.patch.object(models[index], 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=a_mocker)))
mockers.append(a_mocker)
mock_inv_qs = mocker.MagicMock()
mock_inv_qs.count = mocker.MagicMock(return_value=inv_count)
mocker.patch.object(Inventory, 'objects', mocker.MagicMock(filter=mocker.MagicMock(return_value=mock_inv_qs)))
label = Label(id=37) label = Label(id=37)
ret = label.is_candidate_for_detach() ret = label.is_candidate_for_detach()
UnifiedJob.objects.filter.assert_called_with(labels__in=[label.id]) for index in range(0, len(models)):
UnifiedJobTemplate.objects.filter.assert_called_with(labels__in=[label.id]) models[index].objects.filter.assert_called_with(labels__in=[label.id])
Inventory.objects.filter.assert_called_with(labels__in=[label.id]) for index in range(0, len(mockers)):
mock_job_qs.count.assert_called_with() mockers[index].count.assert_called_with()
mock_jt_qs.count.assert_called_with()
mock_inv_qs.count.assert_called_with()
assert ret is expected assert ret is expected

View File

@@ -259,13 +259,14 @@ def test_survey_encryption_defaults(survey_spec_factory, question_type, default,
@pytest.mark.survey @pytest.mark.survey
@pytest.mark.django_db
class TestWorkflowSurveys: class TestWorkflowSurveys:
def test_update_kwargs_survey_defaults(self, survey_spec_factory): def test_update_kwargs_survey_defaults(self, survey_spec_factory):
"Assure that the survey default over-rides a JT variable" "Assure that the survey default over-rides a JT variable"
spec = survey_spec_factory('var1') spec = survey_spec_factory('var1')
spec['spec'][0]['default'] = 3 spec['spec'][0]['default'] = 3
spec['spec'][0]['required'] = False spec['spec'][0]['required'] = False
wfjt = WorkflowJobTemplate(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="var1: 5") wfjt = WorkflowJobTemplate.objects.create(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="var1: 5")
updated_extra_vars = wfjt._update_unified_job_kwargs({}, {}) updated_extra_vars = wfjt._update_unified_job_kwargs({}, {})
assert 'extra_vars' in updated_extra_vars assert 'extra_vars' in updated_extra_vars
assert json.loads(updated_extra_vars['extra_vars'])['var1'] == 3 assert json.loads(updated_extra_vars['extra_vars'])['var1'] == 3
@@ -277,7 +278,7 @@ class TestWorkflowSurveys:
spec['spec'][0]['required'] = False spec['spec'][0]['required'] = False
spec['spec'][1]['required'] = True spec['spec'][1]['required'] = True
spec['spec'][2]['required'] = False spec['spec'][2]['required'] = False
wfjt = WorkflowJobTemplate(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="question2: hiworld") wfjt = WorkflowJobTemplate.objects.create(name="test-wfjt", survey_spec=spec, survey_enabled=True, extra_vars="question2: hiworld")
assert wfjt.variables_needed_to_start == ['question2'] assert wfjt.variables_needed_to_start == ['question2']
assert not wfjt.can_start_without_user_input() assert not wfjt.can_start_without_user_input()
@@ -311,6 +312,6 @@ class TestExtraVarsNoPrompt:
self.process_vars_and_assert(jt, provided_vars, valid) self.process_vars_and_assert(jt, provided_vars, valid)
def test_wfjt_extra_vars_counting(self, provided_vars, valid): def test_wfjt_extra_vars_counting(self, provided_vars, valid):
wfjt = WorkflowJobTemplate(name='foo', extra_vars={'tmpl_var': 'bar'}) wfjt = WorkflowJobTemplate.objects.create(name='foo', extra_vars={'tmpl_var': 'bar'})
prompted_fields, ignored_fields, errors = wfjt._accept_or_ignore_job_kwargs(extra_vars=provided_vars) prompted_fields, ignored_fields, errors = wfjt._accept_or_ignore_job_kwargs(extra_vars=provided_vars)
self.process_vars_and_assert(wfjt, provided_vars, valid) self.process_vars_and_assert(wfjt, provided_vars, valid)

View File

@@ -94,7 +94,7 @@ def workflow_job_unit():
@pytest.fixture @pytest.fixture
def workflow_job_template_unit(): def workflow_job_template_unit():
return WorkflowJobTemplate(name='workflow') return WorkflowJobTemplate.objects.create(name='workflow')
@pytest.fixture @pytest.fixture
@@ -151,6 +151,7 @@ def test_node_getter_and_setters():
assert node.job_type == 'check' assert node.job_type == 'check'
@pytest.mark.django_db
class TestWorkflowJobCreate: class TestWorkflowJobCreate:
def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker): def test_create_no_prompts(self, wfjt_node_no_prompts, workflow_job_unit, mocker):
mock_create = mocker.MagicMock() mock_create = mocker.MagicMock()
@@ -165,6 +166,7 @@ class TestWorkflowJobCreate:
unified_job_template=wfjt_node_no_prompts.unified_job_template, unified_job_template=wfjt_node_no_prompts.unified_job_template,
workflow_job=workflow_job_unit, workflow_job=workflow_job_unit,
identifier=mocker.ANY, identifier=mocker.ANY,
execution_environment=None,
) )
def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, credential, mocker): def test_create_with_prompts(self, wfjt_node_with_prompts, workflow_job_unit, credential, mocker):
@@ -180,9 +182,11 @@ class TestWorkflowJobCreate:
unified_job_template=wfjt_node_with_prompts.unified_job_template, unified_job_template=wfjt_node_with_prompts.unified_job_template,
workflow_job=workflow_job_unit, workflow_job=workflow_job_unit,
identifier=mocker.ANY, identifier=mocker.ANY,
execution_environment=None,
) )
@pytest.mark.django_db
@mock.patch('awx.main.models.workflow.WorkflowNodeBase.get_parent_nodes', lambda self: []) @mock.patch('awx.main.models.workflow.WorkflowNodeBase.get_parent_nodes', lambda self: [])
class TestWorkflowJobNodeJobKWARGS: class TestWorkflowJobNodeJobKWARGS:
""" """
@@ -231,4 +235,12 @@ class TestWorkflowJobNodeJobKWARGS:
def test_get_ask_mapping_integrity(): def test_get_ask_mapping_integrity():
assert list(WorkflowJobTemplate.get_ask_mapping().keys()) == ['extra_vars', 'inventory', 'limit', 'scm_branch'] assert list(WorkflowJobTemplate.get_ask_mapping().keys()) == [
'inventory',
'limit',
'scm_branch',
'labels',
'job_tags',
'skip_tags',
'extra_vars',
]

View File

@@ -196,6 +196,7 @@ def test_jt_can_add_bad_data(user_unit):
assert not access.can_add({'asdf': 'asdf'}) assert not access.can_add({'asdf': 'asdf'})
@pytest.mark.django_db
class TestWorkflowAccessMethods: class TestWorkflowAccessMethods:
@pytest.fixture @pytest.fixture
def workflow(self, workflow_job_template_factory): def workflow(self, workflow_job_template_factory):

View File

@@ -532,6 +532,10 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
if kwargs and field_name in kwargs: if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name] override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)): if isinstance(override_field_val, (set, list, QuerySet)):
# Labels are additive so we are going to add any src labels in addition to the override labels
if field_name == 'labels':
for jt_label in src_field_value.all():
getattr(obj2, field_name).add(jt_label.id)
getattr(obj2, field_name).add(*override_field_val) getattr(obj2, field_name).add(*override_field_val)
continue continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager': if override_field_val.__class__.__name__ == 'ManyRelatedManager':

View File

@@ -0,0 +1,49 @@
const LabelsMixin = (parent) =>
class extends parent {
readLabels(id, params) {
return this.http.get(`${this.baseUrl}${id}/labels/`, {
params,
});
}
readAllLabels(id) {
const fetchLabels = async (pageNo = 1, labels = []) => {
try {
const { data } = await this.http.get(`${this.baseUrl}${id}/labels/`, {
params: {
page: pageNo,
page_size: 200,
},
});
if (data?.next) {
return fetchLabels(pageNo + 1, labels.concat(data.results));
}
return Promise.resolve({
data: {
results: labels.concat(data.results),
},
});
} catch (error) {
return Promise.reject(error);
}
};
return fetchLabels();
}
associateLabel(id, label, orgId) {
return this.http.post(`${this.baseUrl}${id}/labels/`, {
name: label.name,
organization: orgId,
});
}
disassociateLabel(id, label) {
return this.http.post(`${this.baseUrl}${id}/labels/`, {
id: label.id,
disassociate: true,
});
}
};
export default LabelsMixin;

View File

@@ -1,10 +1,11 @@
import Base from '../Base'; import Base from '../Base';
import NotificationsMixin from '../mixins/Notifications.mixin'; import NotificationsMixin from '../mixins/Notifications.mixin';
import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin'; import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin';
import LabelsMixin from '../mixins/Labels.mixin';
import SchedulesMixin from '../mixins/Schedules.mixin'; import SchedulesMixin from '../mixins/Schedules.mixin';
class JobTemplates extends SchedulesMixin( class JobTemplates extends SchedulesMixin(
InstanceGroupsMixin(NotificationsMixin(Base)) InstanceGroupsMixin(NotificationsMixin(LabelsMixin(Base)))
) { ) {
constructor(http) { constructor(http) {
super(http); super(http);
@@ -33,20 +34,6 @@ class JobTemplates extends SchedulesMixin(
return this.http.get(`${this.baseUrl}${id}/launch/`); return this.http.get(`${this.baseUrl}${id}/launch/`);
} }
associateLabel(id, label, orgId) {
return this.http.post(`${this.baseUrl}${id}/labels/`, {
name: label.name,
organization: orgId,
});
}
disassociateLabel(id, label) {
return this.http.post(`${this.baseUrl}${id}/labels/`, {
id: label.id,
disassociate: true,
});
}
readCredentials(id, params) { readCredentials(id, params) {
return this.http.get(`${this.baseUrl}${id}/credentials/`, { return this.http.get(`${this.baseUrl}${id}/credentials/`, {
params, params,

View File

@@ -1,6 +1,8 @@
import Base from '../Base'; import Base from '../Base';
import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin';
import LabelsMixin from '../mixins/Labels.mixin';
class Schedules extends Base { class Schedules extends InstanceGroupsMixin(LabelsMixin(Base)) {
constructor(http) { constructor(http) {
super(http); super(http);
this.baseUrl = 'api/v2/schedules/'; this.baseUrl = 'api/v2/schedules/';

View File

@@ -1,6 +1,8 @@
import Base from '../Base'; import Base from '../Base';
import InstanceGroupsMixin from '../mixins/InstanceGroups.mixin';
import LabelsMixin from '../mixins/Labels.mixin';
class WorkflowJobTemplateNodes extends Base { class WorkflowJobTemplateNodes extends LabelsMixin(InstanceGroupsMixin(Base)) {
constructor(http) { constructor(http) {
super(http); super(http);
this.baseUrl = 'api/v2/workflow_job_template_nodes/'; this.baseUrl = 'api/v2/workflow_job_template_nodes/';

View File

@@ -1,8 +1,11 @@
import Base from '../Base'; import Base from '../Base';
import SchedulesMixin from '../mixins/Schedules.mixin'; import SchedulesMixin from '../mixins/Schedules.mixin';
import NotificationsMixin from '../mixins/Notifications.mixin'; import NotificationsMixin from '../mixins/Notifications.mixin';
import LabelsMixin from '../mixins/Labels.mixin';
class WorkflowJobTemplates extends SchedulesMixin(NotificationsMixin(Base)) { class WorkflowJobTemplates extends SchedulesMixin(
NotificationsMixin(LabelsMixin(Base))
) {
constructor(http) { constructor(http) {
super(http); super(http);
this.baseUrl = 'api/v2/workflow_job_templates/'; this.baseUrl = 'api/v2/workflow_job_templates/';

View File

@@ -1,6 +1,12 @@
import React, { useState, useEffect } from 'react'; import React, { useState, useEffect } from 'react';
import { func, arrayOf, number, shape, string, oneOfType } from 'prop-types'; import { func, arrayOf, number, shape, string, oneOfType } from 'prop-types';
import { Select, SelectOption, SelectVariant } from '@patternfly/react-core'; import {
Chip,
ChipGroup,
Select,
SelectOption,
SelectVariant,
} from '@patternfly/react-core';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import { LabelsAPI } from 'api'; import { LabelsAPI } from 'api';
import useIsMounted from 'hooks/useIsMounted'; import useIsMounted from 'hooks/useIsMounted';
@@ -60,7 +66,12 @@ function LabelSelect({ value, placeholder, onChange, onError, createText }) {
const renderOptions = (opts) => const renderOptions = (opts) =>
opts.map((option) => ( opts.map((option) => (
<SelectOption key={option.id} aria-label={option.name} value={option}> <SelectOption
key={option.id}
aria-label={option.name}
value={option}
isDisabled={option.isReadOnly}
>
{option.name} {option.name}
</SelectOption> </SelectOption>
)); ));
@@ -73,6 +84,23 @@ function LabelSelect({ value, placeholder, onChange, onError, createText }) {
} }
return null; return null;
}; };
const chipGroupComponent = () => (
<ChipGroup>
{(selections || []).map((currentChip) => (
<Chip
isReadOnly={currentChip.isReadOnly}
key={currentChip.name}
onClick={(e) => {
onSelect(e, currentChip);
}}
>
{currentChip.name}
</Chip>
))}
</ChipGroup>
);
return ( return (
<Select <Select
variant={SelectVariant.typeaheadMulti} variant={SelectVariant.typeaheadMulti}
@@ -83,7 +111,7 @@ function LabelSelect({ value, placeholder, onChange, onError, createText }) {
} }
onSelect(e, item); onSelect(e, item);
}} }}
onClear={() => onChange([])} onClear={() => onChange(selections.filter((label) => label.isReadOnly))}
onFilter={onFilter} onFilter={onFilter}
isCreatable isCreatable
onCreateOption={(label) => { onCreateOption={(label) => {
@@ -101,6 +129,7 @@ function LabelSelect({ value, placeholder, onChange, onError, createText }) {
createText={createText} createText={createText}
noResultsFoundText={t`No results found`} noResultsFoundText={t`No results found`}
ouiaId="template-label-select" ouiaId="template-label-select"
chipGroupComponent={chipGroupComponent()}
> >
{renderOptions(options)} {renderOptions(options)}
</Select> </Select>

View File

@@ -63,7 +63,7 @@ describe('<LabelSelect />', () => {
const selectOptions = wrapper.find('SelectOption'); const selectOptions = wrapper.find('SelectOption');
expect(selectOptions).toHaveLength(4); expect(selectOptions).toHaveLength(4);
}); });
test('Generate a label ', async () => { test('Generate a label', async () => {
let wrapper; let wrapper;
const onChange = jest.fn(); const onChange = jest.fn();
LabelsAPI.read.mockReturnValue({ LabelsAPI.read.mockReturnValue({
@@ -79,4 +79,33 @@ describe('<LabelSelect />', () => {
await wrapper.find('Select').invoke('onSelect')({}, 'foo'); await wrapper.find('Select').invoke('onSelect')({}, 'foo');
expect(onChange).toBeCalledWith([{ id: 'foo', name: 'foo' }]); expect(onChange).toBeCalledWith([{ id: 'foo', name: 'foo' }]);
}); });
test('should handle read-only labels', async () => {
let wrapper;
const onChange = jest.fn();
LabelsAPI.read.mockReturnValue({
data: {
results: [
{ id: 1, name: 'read only' },
{ id: 2, name: 'not read only' },
],
},
});
await act(async () => {
wrapper = mount(
<LabelSelect
value={[
{ id: 1, name: 'read only', isReadOnly: true },
{ id: 2, name: 'not read only' },
]}
onError={() => {}}
onChange={onChange}
/>
);
});
wrapper.find('SelectToggle').simulate('click');
const selectOptions = wrapper.find('SelectOption');
expect(selectOptions).toHaveLength(2);
expect(selectOptions.at(0).prop('isDisabled')).toBe(true);
expect(selectOptions.at(1).prop('isDisabled')).toBe(false);
});
}); });

View File

@@ -1,9 +1,7 @@
import React, { useState } from 'react'; import React, { useState } from 'react';
import { useHistory } from 'react-router-dom'; import { useHistory } from 'react-router-dom';
import { number, shape } from 'prop-types'; import { number, shape } from 'prop-types';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import { import {
AdHocCommandsAPI, AdHocCommandsAPI,
InventorySourcesAPI, InventorySourcesAPI,
@@ -24,6 +22,12 @@ function canLaunchWithoutPrompt(launchData) {
!launchData.ask_variables_on_launch && !launchData.ask_variables_on_launch &&
!launchData.ask_limit_on_launch && !launchData.ask_limit_on_launch &&
!launchData.ask_scm_branch_on_launch && !launchData.ask_scm_branch_on_launch &&
!launchData.ask_execution_environment_on_launch &&
!launchData.ask_labels_on_launch &&
!launchData.ask_forks_on_launch &&
!launchData.ask_job_slice_count_on_launch &&
!launchData.ask_timeout_on_launch &&
!launchData.ask_instance_groups_on_launch &&
!launchData.survey_enabled && !launchData.survey_enabled &&
(!launchData.passwords_needed_to_start || (!launchData.passwords_needed_to_start ||
launchData.passwords_needed_to_start.length === 0) && launchData.passwords_needed_to_start.length === 0) &&
@@ -37,6 +41,7 @@ function LaunchButton({ resource, children }) {
const [showLaunchPrompt, setShowLaunchPrompt] = useState(false); const [showLaunchPrompt, setShowLaunchPrompt] = useState(false);
const [launchConfig, setLaunchConfig] = useState(null); const [launchConfig, setLaunchConfig] = useState(null);
const [surveyConfig, setSurveyConfig] = useState(null); const [surveyConfig, setSurveyConfig] = useState(null);
const [labels, setLabels] = useState([]);
const [isLaunching, setIsLaunching] = useState(false); const [isLaunching, setIsLaunching] = useState(false);
const [error, setError] = useState(null); const [error, setError] = useState(null);
@@ -50,6 +55,11 @@ function LaunchButton({ resource, children }) {
resource.type === 'workflow_job_template' resource.type === 'workflow_job_template'
? WorkflowJobTemplatesAPI.readSurvey(resource.id) ? WorkflowJobTemplatesAPI.readSurvey(resource.id)
: JobTemplatesAPI.readSurvey(resource.id); : JobTemplatesAPI.readSurvey(resource.id);
const readLabels =
resource.type === 'workflow_job_template'
? WorkflowJobTemplatesAPI.readAllLabels(resource.id)
: JobTemplatesAPI.readAllLabels(resource.id);
try { try {
const { data: launch } = await readLaunch; const { data: launch } = await readLaunch;
setLaunchConfig(launch); setLaunchConfig(launch);
@@ -60,6 +70,19 @@ function LaunchButton({ resource, children }) {
setSurveyConfig(data); setSurveyConfig(data);
} }
if (launch.ask_labels_on_launch) {
const {
data: { results },
} = await readLabels;
const allLabels = results.map((label) => ({
...label,
isReadOnly: true,
}));
setLabels(allLabels);
}
if (canLaunchWithoutPrompt(launch)) { if (canLaunchWithoutPrompt(launch)) {
await launchWithParams({}); await launchWithParams({});
} else { } else {
@@ -171,6 +194,7 @@ function LaunchButton({ resource, children }) {
launchConfig={launchConfig} launchConfig={launchConfig}
surveyConfig={surveyConfig} surveyConfig={surveyConfig}
resource={resource} resource={resource}
labels={labels}
onLaunch={launchWithParams} onLaunch={launchWithParams}
onCancel={() => setShowLaunchPrompt(false)} onCancel={() => setShowLaunchPrompt(false)}
/> />

View File

@@ -37,6 +37,12 @@ describe('LaunchButton', () => {
ask_variables_on_launch: false, ask_variables_on_launch: false,
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_scm_branch_on_launch: false, ask_scm_branch_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
}, },

View File

@@ -5,6 +5,7 @@ import { Formik, useFormikContext } from 'formik';
import { useDismissableError } from 'hooks/useRequest'; import { useDismissableError } from 'hooks/useRequest';
import mergeExtraVars from 'util/prompt/mergeExtraVars'; import mergeExtraVars from 'util/prompt/mergeExtraVars';
import getSurveyValues from 'util/prompt/getSurveyValues'; import getSurveyValues from 'util/prompt/getSurveyValues';
import createNewLabels from 'util/labels';
import ContentLoading from '../ContentLoading'; import ContentLoading from '../ContentLoading';
import ContentError from '../ContentError'; import ContentError from '../ContentError';
import useLaunchSteps from './useLaunchSteps'; import useLaunchSteps from './useLaunchSteps';
@@ -15,7 +16,9 @@ function PromptModalForm({
onCancel, onCancel,
onSubmit, onSubmit,
resource, resource,
labels,
surveyConfig, surveyConfig,
instanceGroups,
}) { }) {
const { setFieldTouched, values } = useFormikContext(); const { setFieldTouched, values } = useFormikContext();
const [showDescription, setShowDescription] = useState(false); const [showDescription, setShowDescription] = useState(false);
@@ -27,9 +30,15 @@ function PromptModalForm({
visitStep, visitStep,
visitAllSteps, visitAllSteps,
contentError, contentError,
} = useLaunchSteps(launchConfig, surveyConfig, resource); } = useLaunchSteps(
launchConfig,
surveyConfig,
resource,
labels,
instanceGroups
);
const handleSubmit = () => { const handleSubmit = async () => {
const postValues = {}; const postValues = {};
const setValue = (key, value) => { const setValue = (key, value) => {
if (typeof value !== 'undefined' && value !== null) { if (typeof value !== 'undefined' && value !== null) {
@@ -53,6 +62,27 @@ function PromptModalForm({
setValue('extra_vars', mergeExtraVars(extraVars, surveyValues)); setValue('extra_vars', mergeExtraVars(extraVars, surveyValues));
setValue('scm_branch', values.scm_branch); setValue('scm_branch', values.scm_branch);
setValue('verbosity', values.verbosity); setValue('verbosity', values.verbosity);
setValue('timeout', values.timeout);
setValue('forks', values.forks);
setValue('job_slice_count', values.job_slice_count);
setValue('execution_environment', values.execution_environment?.id);
if (launchConfig.ask_instance_groups_on_launch) {
const instanceGroupIds = [];
values.instance_groups.forEach((instance_group) => {
instanceGroupIds.push(instance_group.id);
});
setValue('instance_groups', instanceGroupIds);
}
if (launchConfig.ask_labels_on_launch) {
const { labelIds } = createNewLabels(
values.labels,
resource.organization
);
setValue('labels', labelIds);
}
onSubmit(postValues); onSubmit(postValues);
}; };
@@ -137,6 +167,7 @@ function LaunchPrompt({
onCancel, onCancel,
onLaunch, onLaunch,
resource = {}, resource = {},
labels = [],
surveyConfig, surveyConfig,
resourceDefaultCredentials = [], resourceDefaultCredentials = [],
}) { }) {
@@ -148,7 +179,9 @@ function LaunchPrompt({
launchConfig={launchConfig} launchConfig={launchConfig}
surveyConfig={surveyConfig} surveyConfig={surveyConfig}
resource={resource} resource={resource}
labels={labels}
resourceDefaultCredentials={resourceDefaultCredentials} resourceDefaultCredentials={resourceDefaultCredentials}
instanceGroups={[]}
/> />
</Formik> </Formik>
); );

View File

@@ -1,6 +1,8 @@
import React from 'react'; import React from 'react';
import { act, isElementOfType } from 'react-dom/test-utils'; import { act, isElementOfType } from 'react-dom/test-utils';
import { import {
ExecutionEnvironmentsAPI,
InstanceGroupsAPI,
InventoriesAPI, InventoriesAPI,
CredentialsAPI, CredentialsAPI,
CredentialTypesAPI, CredentialTypesAPI,
@@ -16,11 +18,16 @@ import CredentialsStep from './steps/CredentialsStep';
import CredentialPasswordsStep from './steps/CredentialPasswordsStep'; import CredentialPasswordsStep from './steps/CredentialPasswordsStep';
import OtherPromptsStep from './steps/OtherPromptsStep'; import OtherPromptsStep from './steps/OtherPromptsStep';
import PreviewStep from './steps/PreviewStep'; import PreviewStep from './steps/PreviewStep';
import ExecutionEnvironmentStep from './steps/ExecutionEnvironmentStep';
import InstanceGroupsStep from './steps/InstanceGroupsStep';
import SurveyStep from './steps/SurveyStep';
jest.mock('../../api/models/Inventories'); jest.mock('../../api/models/Inventories');
jest.mock('../../api/models/ExecutionEnvironments');
jest.mock('../../api/models/CredentialTypes'); jest.mock('../../api/models/CredentialTypes');
jest.mock('../../api/models/Credentials'); jest.mock('../../api/models/Credentials');
jest.mock('../../api/models/JobTemplates'); jest.mock('../../api/models/JobTemplates');
jest.mock('../../api/models/InstanceGroups');
let config; let config;
const resource = { const resource = {
@@ -62,6 +69,79 @@ describe('LaunchPrompt', () => {
spec: [{ type: 'text', variable: 'foo' }], spec: [{ type: 'text', variable: 'foo' }],
}, },
}); });
InstanceGroupsAPI.read.mockResolvedValue({
data: {
results: [
{
id: 2,
type: 'instance_group',
url: '/api/v2/instance_groups/2/',
related: {
jobs: '/api/v2/instance_groups/2/jobs/',
instances: '/api/v2/instance_groups/2/instances/',
},
name: 'default',
created: '2022-08-30T20:35:05.747132Z',
modified: '2022-08-30T20:35:05.756690Z',
capacity: 177,
consumed_capacity: 0,
percent_capacity_remaining: 100.0,
jobs_running: 0,
jobs_total: 2,
instances: 3,
is_container_group: false,
credential: null,
policy_instance_percentage: 100,
policy_instance_minimum: 0,
policy_instance_list: [],
pod_spec_override: '',
summary_fields: {
user_capabilities: {
edit: true,
delete: false,
},
},
},
],
count: 1,
},
});
ExecutionEnvironmentsAPI.read.mockResolvedValue({
data: {
results: [
{
id: 1,
type: 'execution_environment',
url: '/api/v2/execution_environments/1/',
related: {
activity_stream:
'/api/v2/execution_environments/1/activity_stream/',
unified_job_templates:
'/api/v2/execution_environments/1/unified_job_templates/',
copy: '/api/v2/execution_environments/1/copy/',
},
summary_fields: {
execution_environment: {},
user_capabilities: {
edit: true,
delete: true,
copy: true,
},
},
created: '2022-08-30T20:34:55.842997Z',
modified: '2022-08-30T20:34:55.859874Z',
name: 'AWX EE (latest)',
description: '',
organization: null,
image: 'quay.io/ansible/awx-ee:latest',
managed: false,
credential: null,
pull: '',
},
],
count: 1,
},
});
config = { config = {
can_start_without_user_input: false, can_start_without_user_input: false,
@@ -76,6 +156,12 @@ describe('LaunchPrompt', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -96,6 +182,8 @@ describe('LaunchPrompt', () => {
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: true, ask_credential_on_launch: true,
ask_scm_branch_on_launch: true, ask_scm_branch_on_launch: true,
ask_execution_environment_on_launch: true,
ask_instance_groups_on_launch: true,
survey_enabled: true, survey_enabled: true,
passwords_needed_to_start: ['ssh_password'], passwords_needed_to_start: ['ssh_password'],
defaults: { defaults: {
@@ -150,13 +238,15 @@ describe('LaunchPrompt', () => {
const wizard = await waitForElement(wrapper, 'Wizard'); const wizard = await waitForElement(wrapper, 'Wizard');
const steps = wizard.prop('steps'); const steps = wizard.prop('steps');
expect(steps).toHaveLength(6); expect(steps).toHaveLength(8);
expect(steps[0].name.props.children).toEqual('Inventory'); expect(steps[0].name.props.children).toEqual('Inventory');
expect(steps[1].name.props.children).toEqual('Credentials'); expect(steps[1].name.props.children).toEqual('Credentials');
expect(steps[2].name.props.children).toEqual('Credential passwords'); expect(steps[2].name.props.children).toEqual('Credential passwords');
expect(steps[3].name.props.children).toEqual('Other prompts'); expect(steps[3].name.props.children).toEqual('Execution Environment');
expect(steps[4].name.props.children).toEqual('Survey'); expect(steps[4].name.props.children).toEqual('Instance Groups');
expect(steps[5].name.props.children).toEqual('Preview'); expect(steps[5].name.props.children).toEqual('Other prompts');
expect(steps[6].name.props.children).toEqual('Survey');
expect(steps[7].name.props.children).toEqual('Preview');
expect(wizard.find('WizardHeader').prop('title')).toBe('Launch | Foobar'); expect(wizard.find('WizardHeader').prop('title')).toBe('Launch | Foobar');
expect(wizard.find('WizardHeader').prop('description')).toBe( expect(wizard.find('WizardHeader').prop('description')).toBe(
'Foo Description' 'Foo Description'
@@ -214,6 +304,58 @@ describe('LaunchPrompt', () => {
expect(isElementOfType(steps[2].component, PreviewStep)).toEqual(true); expect(isElementOfType(steps[2].component, PreviewStep)).toEqual(true);
}); });
test('should add execution environment step', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<LaunchPrompt
launchConfig={{
...config,
ask_execution_environment_on_launch: true,
}}
resource={resource}
onLaunch={noop}
onCancel={noop}
/>
);
});
const wizard = await waitForElement(wrapper, 'Wizard');
const steps = wizard.prop('steps');
expect(steps).toHaveLength(2);
expect(steps[0].name.props.children).toEqual('Execution Environment');
expect(
isElementOfType(steps[0].component, ExecutionEnvironmentStep)
).toEqual(true);
expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true);
});
test('should add instance groups step', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<LaunchPrompt
launchConfig={{
...config,
ask_instance_groups_on_launch: true,
}}
resource={resource}
onLaunch={noop}
onCancel={noop}
/>
);
});
const wizard = await waitForElement(wrapper, 'Wizard');
const steps = wizard.prop('steps');
expect(steps).toHaveLength(2);
expect(steps[0].name.props.children).toEqual('Instance Groups');
expect(isElementOfType(steps[0].component, InstanceGroupsStep)).toEqual(
true
);
expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true);
});
test('should add other prompts step', async () => { test('should add other prompts step', async () => {
let wrapper; let wrapper;
await act(async () => { await act(async () => {
@@ -237,4 +379,46 @@ describe('LaunchPrompt', () => {
expect(isElementOfType(steps[0].component, OtherPromptsStep)).toEqual(true); expect(isElementOfType(steps[0].component, OtherPromptsStep)).toEqual(true);
expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true); expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true);
}); });
test('should add survey step', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<LaunchPrompt
launchConfig={{
...config,
survey_enabled: true,
}}
resource={resource}
onLaunch={noop}
onCancel={noop}
surveyConfig={{
name: '',
description: '',
spec: [
{
choices: '',
default: '',
max: 1024,
min: 0,
new_question: false,
question_description: '',
question_name: 'foo',
required: true,
type: 'text',
variable: 'foo',
},
],
}}
/>
);
});
const wizard = await waitForElement(wrapper, 'Wizard');
const steps = wizard.prop('steps');
expect(steps).toHaveLength(2);
expect(steps[0].name.props.children).toEqual('Survey');
expect(isElementOfType(steps[0].component, SurveyStep)).toEqual(true);
expect(isElementOfType(steps[1].component, PreviewStep)).toEqual(true);
});
}); });

View File

@@ -132,7 +132,7 @@ function CredentialsStep({
); );
return ( return (
<> <div data-cy="credentials-prompt">
{meta.error && ( {meta.error && (
<CredentialErrorAlert variant="danger" isInline title={meta.error} /> <CredentialErrorAlert variant="danger" isInline title={meta.error} />
)} )}
@@ -208,7 +208,7 @@ function CredentialsStep({
}} }}
renderItemChip={renderChip} renderItemChip={renderChip}
/> />
</> </div>
); );
} }

View File

@@ -0,0 +1,118 @@
import React, { useCallback, useEffect } from 'react';
import { useHistory } from 'react-router-dom';
import { t } from '@lingui/macro';
import { useField } from 'formik';
import { ExecutionEnvironmentsAPI } from 'api';
import { getSearchableKeys } from 'components/PaginatedTable';
import { getQSConfig, parseQueryString } from 'util/qs';
import useRequest from 'hooks/useRequest';
import OptionsList from '../../OptionsList';
import ContentLoading from '../../ContentLoading';
import ContentError from '../../ContentError';
const QS_CONFIG = getQSConfig('execution_environment', {
page: 1,
page_size: 5,
});
function ExecutionEnvironmentStep() {
const [field, , helpers] = useField('execution_environment');
const history = useHistory();
const {
isLoading,
error,
result: {
execution_environments,
count,
relatedSearchableKeys,
searchableKeys,
},
request: fetchExecutionEnvironments,
} = useRequest(
useCallback(async () => {
const params = parseQueryString(QS_CONFIG, history.location.search);
const [{ data }, actionsResponse] = await Promise.all([
ExecutionEnvironmentsAPI.read(params),
ExecutionEnvironmentsAPI.readOptions(),
]);
return {
execution_environments: data.results,
count: data.count,
relatedSearchableKeys: (
actionsResponse?.data?.related_search_fields || []
).map((val) => val.slice(0, -8)),
searchableKeys: getSearchableKeys(actionsResponse.data.actions?.GET),
};
}, [history.location]),
{
count: 0,
execution_environments: [],
relatedSearchableKeys: [],
searchableKeys: [],
}
);
useEffect(() => {
fetchExecutionEnvironments();
}, [fetchExecutionEnvironments]);
if (isLoading) {
return <ContentLoading />;
}
if (error) {
return <ContentError error={error} />;
}
return (
<div data-cy="execution-environment-prompt">
<OptionsList
value={field.value ? [field.value] : []}
options={execution_environments}
optionCount={count}
columns={[
{
name: t`Name`,
key: 'name',
},
{
name: t`Image`,
key: 'image',
},
]}
searchColumns={[
{
name: t`Name`,
key: 'name__icontains',
isDefault: true,
},
{
name: t`Image`,
key: 'image__icontains',
},
]}
sortColumns={[
{
name: t`Name`,
key: 'name',
},
{
name: t`Image`,
key: 'image',
},
]}
searchableKeys={searchableKeys}
relatedSearchableKeys={relatedSearchableKeys}
header={t`Execution Environments`}
name="execution_environment"
qsConfig={QS_CONFIG}
readOnly
selectItem={helpers.setValue}
deselectItem={() => helpers.setValue(null)}
/>
</div>
);
}
export default ExecutionEnvironmentStep;

View File

@@ -0,0 +1,52 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import { Formik } from 'formik';
import { ExecutionEnvironmentsAPI } from 'api';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import ExecutionEnvironmentStep from './ExecutionEnvironmentStep';
jest.mock('../../../api/models/ExecutionEnvironments');
const execution_environments = [
{ id: 1, name: 'ee one', url: '/execution_environments/1' },
{ id: 2, name: 'ee two', url: '/execution_environments/2' },
{ id: 3, name: 'ee three', url: '/execution_environments/3' },
];
describe('ExecutionEnvironmentStep', () => {
beforeEach(() => {
ExecutionEnvironmentsAPI.read.mockResolvedValue({
data: {
results: execution_environments,
count: 3,
},
});
ExecutionEnvironmentsAPI.readOptions.mockResolvedValue({
data: {
actions: {
GET: {},
POST: {},
},
related_search_fields: [],
},
});
});
test('should load execution environments', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<ExecutionEnvironmentStep />
</Formik>
);
});
wrapper.update();
expect(ExecutionEnvironmentsAPI.read).toHaveBeenCalled();
expect(wrapper.find('OptionsList').prop('options')).toEqual(
execution_environments
);
});
});

View File

@@ -0,0 +1,108 @@
import React, { useCallback, useEffect } from 'react';
import { useHistory } from 'react-router-dom';
import { t } from '@lingui/macro';
import { useField } from 'formik';
import { InstanceGroupsAPI } from 'api';
import { getSearchableKeys } from 'components/PaginatedTable';
import { getQSConfig, parseQueryString } from 'util/qs';
import useRequest from 'hooks/useRequest';
import useSelected from 'hooks/useSelected';
import OptionsList from '../../OptionsList';
import ContentLoading from '../../ContentLoading';
import ContentError from '../../ContentError';
const QS_CONFIG = getQSConfig('instance-groups', {
page: 1,
page_size: 5,
order_by: 'name',
});
function InstanceGroupsStep() {
const [field, , helpers] = useField('instance_groups');
const { selected, handleSelect, setSelected } = useSelected([], field.value);
const history = useHistory();
const {
result: { instance_groups, count, relatedSearchableKeys, searchableKeys },
request: fetchInstanceGroups,
error,
isLoading,
} = useRequest(
useCallback(async () => {
const params = parseQueryString(QS_CONFIG, history.location.search);
const [{ data }, actionsResponse] = await Promise.all([
InstanceGroupsAPI.read(params),
InstanceGroupsAPI.readOptions(),
]);
return {
instance_groups: data.results,
count: data.count,
relatedSearchableKeys: (
actionsResponse?.data?.related_search_fields || []
).map((val) => val.slice(0, -8)),
searchableKeys: getSearchableKeys(actionsResponse.data.actions?.GET),
};
}, [history.location]),
{
instance_groups: [],
count: 0,
relatedSearchableKeys: [],
searchableKeys: [],
}
);
useEffect(() => {
fetchInstanceGroups();
}, [fetchInstanceGroups]);
useEffect(() => {
helpers.setValue(selected);
}, [selected]); // eslint-disable-line react-hooks/exhaustive-deps
if (isLoading) {
return <ContentLoading />;
}
if (error) {
return <ContentError error={error} />;
}
return (
<div data-cy="instance-groups-prompt">
<OptionsList
value={selected}
options={instance_groups}
optionCount={count}
searchColumns={[
{
name: t`Name`,
key: 'name__icontains',
isDefault: true,
},
{
name: t`Credential Name`,
key: 'credential__name__icontains',
},
]}
sortColumns={[
{
name: t`Name`,
key: 'name',
},
]}
searchableKeys={searchableKeys}
relatedSearchableKeys={relatedSearchableKeys}
multiple
header={t`Instance Groups`}
name="instanceGroups"
qsConfig={QS_CONFIG}
selectItem={handleSelect}
deselectItem={handleSelect}
sortSelectedItems={(selectedItems) => setSelected(selectedItems)}
isSelectedDraggable
/>
</div>
);
}
export default InstanceGroupsStep;

View File

@@ -0,0 +1,52 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import { Formik } from 'formik';
import { InstanceGroupsAPI } from 'api';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import InstanceGroupsStep from './InstanceGroupsStep';
jest.mock('../../../api/models/InstanceGroups');
const instance_groups = [
{ id: 1, name: 'ig one', url: '/instance_groups/1' },
{ id: 2, name: 'ig two', url: '/instance_groups/2' },
{ id: 3, name: 'ig three', url: '/instance_groups/3' },
];
describe('InstanceGroupsStep', () => {
beforeEach(() => {
InstanceGroupsAPI.read.mockResolvedValue({
data: {
results: instance_groups,
count: 3,
},
});
InstanceGroupsAPI.readOptions.mockResolvedValue({
data: {
actions: {
GET: {},
POST: {},
},
related_search_fields: [],
},
});
});
test('should load instance groups', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<Formik initialValues={{ instance_groups: [] }}>
<InstanceGroupsStep />
</Formik>
);
});
wrapper.update();
expect(InstanceGroupsAPI.read).toHaveBeenCalled();
expect(wrapper.find('OptionsList').prop('options')).toEqual(
instance_groups
);
});
});

View File

@@ -70,7 +70,7 @@ function InventoryStep({ warningMessage = null }) {
} }
return ( return (
<> <div data-cy="inventory-prompt">
{meta.touched && meta.error && ( {meta.touched && meta.error && (
<InventoryErrorAlert variant="danger" isInline title={meta.error} /> <InventoryErrorAlert variant="danger" isInline title={meta.error} />
)} )}
@@ -109,7 +109,7 @@ function InventoryStep({ warningMessage = null }) {
selectItem={helpers.setValue} selectItem={helpers.setValue}
deselectItem={() => field.onChange(null)} deselectItem={() => field.onChange(null)}
/> />
</> </div>
); );
} }

View File

@@ -1,15 +1,17 @@
import React from 'react'; import React from 'react';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import { useField } from 'formik'; import { useField } from 'formik';
import { Form, FormGroup, Switch } from '@patternfly/react-core'; import { Form, FormGroup, Switch } from '@patternfly/react-core';
import styled from 'styled-components'; import styled from 'styled-components';
import LabelSelect from '../../LabelSelect';
import FormField from '../../FormField'; import FormField from '../../FormField';
import { TagMultiSelect } from '../../MultiSelect'; import { TagMultiSelect } from '../../MultiSelect';
import AnsibleSelect from '../../AnsibleSelect'; import AnsibleSelect from '../../AnsibleSelect';
import { VariablesField } from '../../CodeEditor'; import { VariablesField } from '../../CodeEditor';
import Popover from '../../Popover'; import Popover from '../../Popover';
import { VerbositySelectField } from '../../VerbositySelectField'; import { VerbositySelectField } from '../../VerbositySelectField';
import jobHelpText from '../../../screens/Job/Job.helptext';
import workflowHelpText from '../../../screens/Template/shared/WorkflowJobTemplate.helptext';
const FieldHeader = styled.div` const FieldHeader = styled.div`
display: flex; display: flex;
@@ -22,72 +24,105 @@ const FieldHeader = styled.div`
`; `;
function OtherPromptsStep({ launchConfig, variablesMode, onVarModeChange }) { function OtherPromptsStep({ launchConfig, variablesMode, onVarModeChange }) {
const helpTextSource = launchConfig.job_template_data
? jobHelpText
: workflowHelpText;
return ( return (
<Form <div data-cy="other-prompts">
onSubmit={(e) => { <Form
e.preventDefault(); onSubmit={(e) => {
}} e.preventDefault();
> }}
{launchConfig.ask_job_type_on_launch && <JobTypeField />} >
{launchConfig.ask_limit_on_launch && ( {launchConfig.ask_job_type_on_launch && (
<FormField <JobTypeField helpTextSource={helpTextSource} />
id="prompt-limit" )}
name="limit" {launchConfig.ask_scm_branch_on_launch && (
label={t`Limit`} <FormField
tooltip={t`Provide a host pattern to further constrain the list id="prompt-scm-branch"
of hosts that will be managed or affected by the playbook. Multiple name="scm_branch"
patterns are allowed. Refer to Ansible documentation for more label={t`Source Control Branch`}
information and examples on patterns.`} tooltip={helpTextSource.sourceControlBranch}
/> />
)} )}
{launchConfig.ask_scm_branch_on_launch && ( {launchConfig.ask_labels_on_launch && (
<FormField <LabelsField helpTextSource={helpTextSource} />
id="prompt-scm-branch" )}
name="scm_branch" {launchConfig.ask_forks_on_launch && (
label={t`Source Control Branch`} <FormField
tooltip={t`Select a branch for the workflow. This branch is applied to all job template nodes that prompt for a branch`} id="prompt-forks"
/> name="forks"
)} label={t`Forks`}
{launchConfig.ask_verbosity_on_launch && <VerbosityField />} type="number"
{launchConfig.ask_diff_mode_on_launch && <ShowChangesToggle />} min="0"
{launchConfig.ask_tags_on_launch && ( tooltip={helpTextSource.forks}
<TagField />
id="prompt-job-tags" )}
name="job_tags" {launchConfig.ask_limit_on_launch && (
label={t`Job Tags`} <FormField
aria-label={t`Job Tags`} id="prompt-limit"
tooltip={t`Tags are useful when you have a large name="limit"
playbook, and you want to run a specific part of a play or task. label={t`Limit`}
Use commas to separate multiple tags. Refer to Ansible Controller tooltip={helpTextSource.limit}
documentation for details on the usage of tags.`} />
/> )}
)} {launchConfig.ask_verbosity_on_launch && (
{launchConfig.ask_skip_tags_on_launch && ( <VerbosityField helpTextSource={helpTextSource} />
<TagField )}
id="prompt-skip-tags" {launchConfig.ask_job_slice_count_on_launch && (
name="skip_tags" <FormField
label={t`Skip Tags`} id="prompt-job-slicing"
aria-label={t`Skip Tags`} name="job_slice_count"
tooltip={t`Skip tags are useful when you have a large label={t`Job Slicing`}
playbook, and you want to skip specific parts of a play or task. type="number"
Use commas to separate multiple tags. Refer to Ansible Controller min="1"
documentation for details on the usage of tags.`} tooltip={helpTextSource.jobSlicing}
/> />
)} )}
{launchConfig.ask_variables_on_launch && ( {launchConfig.ask_timeout_on_launch && (
<VariablesField <FormField
id="prompt-variables" id="prompt-timeout"
name="extra_vars" name="timeout"
label={t`Variables`} label={t`Timeout`}
initialMode={variablesMode} type="number"
onModeChange={onVarModeChange} min="0"
/> tooltip={helpTextSource.timeout}
)} />
</Form> )}
{launchConfig.ask_diff_mode_on_launch && <ShowChangesToggle />}
{launchConfig.ask_tags_on_launch && (
<TagField
id="prompt-job-tags"
name="job_tags"
label={t`Job Tags`}
aria-label={t`Job Tags`}
tooltip={helpTextSource.jobTags}
/>
)}
{launchConfig.ask_skip_tags_on_launch && (
<TagField
id="prompt-skip-tags"
name="skip_tags"
label={t`Skip Tags`}
aria-label={t`Skip Tags`}
tooltip={helpTextSource.skipTags}
/>
)}
{launchConfig.ask_variables_on_launch && (
<VariablesField
id="prompt-variables"
name="extra_vars"
label={t`Variables`}
initialMode={variablesMode}
onModeChange={onVarModeChange}
/>
)}
</Form>
</div>
); );
} }
function JobTypeField() { function JobTypeField({ helpTextSource }) {
const [field, meta, helpers] = useField('job_type'); const [field, meta, helpers] = useField('job_type');
const options = [ const options = [
{ {
@@ -107,15 +142,9 @@ function JobTypeField() {
const isValid = !(meta.touched && meta.error); const isValid = !(meta.touched && meta.error);
return ( return (
<FormGroup <FormGroup
fieldId="propmt-job-type" fieldId="prompt-job-type"
label={t`Job Type`} label={t`Job Type`}
labelIcon={ labelIcon={<Popover content={helpTextSource.jobType} />}
<Popover
content={t`For job templates, select run to execute the playbook.
Select check to only check playbook syntax, test environment setup,
and report problems without executing the playbook.`}
/>
}
isRequired isRequired
validated={isValid ? 'default' : 'error'} validated={isValid ? 'default' : 'error'}
> >
@@ -129,15 +158,14 @@ function JobTypeField() {
); );
} }
function VerbosityField() { function VerbosityField({ helpTextSource }) {
const [, meta] = useField('verbosity'); const [, meta] = useField('verbosity');
const isValid = !(meta.touched && meta.error); const isValid = !(meta.touched && meta.error);
return ( return (
<VerbositySelectField <VerbositySelectField
fieldId="prompt-verbosity" fieldId="prompt-verbosity"
tooltip={t`Control the level of output ansible tooltip={helpTextSource.verbosity}
will produce as the playbook executes.`}
isValid={isValid ? 'default' : 'error'} isValid={isValid ? 'default' : 'error'}
/> />
); );
@@ -186,4 +214,25 @@ function TagField({ id, name, label, tooltip }) {
); );
} }
function LabelsField({ helpTextSource }) {
const [field, meta, helpers] = useField('labels');
return (
<FormGroup
fieldId="prompt-labels"
label={t`Labels`}
labelIcon={<Popover content={helpTextSource.labels} />}
validated={!meta.touched || !meta.error ? 'default' : 'error'}
helperTextInvalid={meta.error}
>
<LabelSelect
value={field.value}
onChange={(labels) => helpers.setValue(labels)}
createText={t`Create`}
onError={(err) => helpers.setError(err)}
/>
</FormGroup>
);
}
export default OtherPromptsStep; export default OtherPromptsStep;

View File

@@ -13,6 +13,11 @@ describe('OtherPromptsStep', () => {
<OtherPromptsStep <OtherPromptsStep
launchConfig={{ launchConfig={{
ask_job_type_on_launch: true, ask_job_type_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}} }}
/> />
</Formik> </Formik>
@@ -36,6 +41,11 @@ describe('OtherPromptsStep', () => {
<OtherPromptsStep <OtherPromptsStep
launchConfig={{ launchConfig={{
ask_limit_on_launch: true, ask_limit_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}} }}
/> />
</Formik> </Formik>
@@ -48,6 +58,81 @@ describe('OtherPromptsStep', () => {
); );
}); });
test('should render timeout field', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<OtherPromptsStep
launchConfig={{
ask_timeout_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}}
/>
</Formik>
);
});
expect(wrapper.find('FormField#prompt-timeout')).toHaveLength(1);
expect(wrapper.find('FormField#prompt-timeout input').prop('name')).toEqual(
'timeout'
);
});
test('should render forks field', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<OtherPromptsStep
launchConfig={{
ask_forks_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}}
/>
</Formik>
);
});
expect(wrapper.find('FormField#prompt-forks')).toHaveLength(1);
expect(wrapper.find('FormField#prompt-forks input').prop('name')).toEqual(
'forks'
);
});
test('should render job slicing field', async () => {
let wrapper;
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<OtherPromptsStep
launchConfig={{
ask_job_slice_count_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}}
/>
</Formik>
);
});
expect(wrapper.find('FormField#prompt-job-slicing')).toHaveLength(1);
expect(
wrapper.find('FormField#prompt-job-slicing input').prop('name')
).toEqual('job_slice_count');
});
test('should render source control branch field', async () => { test('should render source control branch field', async () => {
let wrapper; let wrapper;
await act(async () => { await act(async () => {
@@ -56,6 +141,11 @@ describe('OtherPromptsStep', () => {
<OtherPromptsStep <OtherPromptsStep
launchConfig={{ launchConfig={{
ask_scm_branch_on_launch: true, ask_scm_branch_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}} }}
/> />
</Formik> </Formik>
@@ -76,6 +166,11 @@ describe('OtherPromptsStep', () => {
<OtherPromptsStep <OtherPromptsStep
launchConfig={{ launchConfig={{
ask_verbosity_on_launch: true, ask_verbosity_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}} }}
/> />
</Formik> </Formik>
@@ -96,6 +191,11 @@ describe('OtherPromptsStep', () => {
<OtherPromptsStep <OtherPromptsStep
launchConfig={{ launchConfig={{
ask_diff_mode_on_launch: true, ask_diff_mode_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}} }}
/> />
</Formik> </Formik>
@@ -119,6 +219,11 @@ describe('OtherPromptsStep', () => {
onVarModeChange={onModeChange} onVarModeChange={onModeChange}
launchConfig={{ launchConfig={{
ask_variables_on_launch: true, ask_variables_on_launch: true,
job_template_data: {
name: 'Demo Job Template',
id: 1,
description: '',
},
}} }}
/> />
</Formik> </Formik>

View File

@@ -52,7 +52,7 @@ function PreviewStep({ resource, launchConfig, surveyConfig, formErrors }) {
} }
return ( return (
<> <div data-cy="prompt-preview">
{formErrors && ( {formErrors && (
<ErrorMessageWrapper> <ErrorMessageWrapper>
{t`Some of the previous step(s) have errors`} {t`Some of the previous step(s) have errors`}
@@ -70,7 +70,7 @@ function PreviewStep({ resource, launchConfig, surveyConfig, formErrors }) {
launchConfig={launchConfig} launchConfig={launchConfig}
overrides={overrides} overrides={overrides}
/> />
</> </div>
); );
} }

View File

@@ -31,16 +31,18 @@ function SurveyStep({ surveyConfig }) {
float: NumberField, float: NumberField,
}; };
return ( return (
<Form <div data-cy="survey-prompts">
onSubmit={(e) => { <Form
e.preventDefault(); onSubmit={(e) => {
}} e.preventDefault();
> }}
{surveyConfig.spec.map((question) => { >
const Field = fieldTypes[question.type]; {surveyConfig.spec.map((question) => {
return <Field key={question.variable} question={question} />; const Field = fieldTypes[question.type];
})} return <Field key={question.variable} question={question} />;
</Form> })}
</Form>
</div>
); );
} }
SurveyStep.propTypes = { SurveyStep.propTypes = {

View File

@@ -10,7 +10,7 @@ const STEP_ID = 'credentials';
export default function useCredentialsStep( export default function useCredentialsStep(
launchConfig, launchConfig,
resource, resource,
resourceDefaultCredentials, resourceDefaultCredentials = [],
allowCredentialsWithPasswords = false allowCredentialsWithPasswords = false
) { ) {
const [field, meta, helpers] = useField('credentials'); const [field, meta, helpers] = useField('credentials');
@@ -78,6 +78,6 @@ function getInitialValues(launchConfig, resourceDefaultCredentials) {
} }
return { return {
credentials: resourceDefaultCredentials || [], credentials: resourceDefaultCredentials,
}; };
} }

View File

@@ -0,0 +1,46 @@
import React from 'react';
import { t } from '@lingui/macro';
import ExecutionEnvironmentStep from './ExecutionEnvironmentStep';
import StepName from './StepName';
const STEP_ID = 'executionEnvironment';
export default function useExecutionEnvironmentStep(launchConfig, resource) {
return {
step: getStep(launchConfig, resource),
initialValues: getInitialValues(launchConfig, resource),
isReady: true,
contentError: null,
hasError: false,
setTouched: (setFieldTouched) => {
setFieldTouched('execution_environment', true, false);
},
validate: () => {},
};
}
function getStep(launchConfig) {
if (!launchConfig.ask_execution_environment_on_launch) {
return null;
}
return {
id: STEP_ID,
name: (
<StepName id="execution-environment-step">
{t`Execution Environment`}
</StepName>
),
component: <ExecutionEnvironmentStep />,
enableNext: true,
};
}
function getInitialValues(launchConfig, resource) {
if (!launchConfig.ask_execution_environment_on_launch) {
return {};
}
return {
execution_environment:
resource?.summary_fields?.execution_environment || null,
};
}

View File

@@ -0,0 +1,45 @@
import React from 'react';
import { t } from '@lingui/macro';
import InstanceGroupsStep from './InstanceGroupsStep';
import StepName from './StepName';
const STEP_ID = 'instanceGroups';
export default function useInstanceGroupsStep(
launchConfig,
resource,
instanceGroups
) {
return {
step: getStep(launchConfig, resource),
initialValues: getInitialValues(launchConfig, instanceGroups),
isReady: true,
contentError: null,
hasError: false,
setTouched: (setFieldTouched) => {
setFieldTouched('instance_groups', true, false);
},
validate: () => {},
};
}
function getStep(launchConfig) {
if (!launchConfig.ask_instance_groups_on_launch) {
return null;
}
return {
id: STEP_ID,
name: <StepName id="instance-groups-step">{t`Instance Groups`}</StepName>,
component: <InstanceGroupsStep />,
enableNext: true,
};
}
function getInitialValues(launchConfig, instanceGroups) {
if (!launchConfig.ask_instance_groups_on_launch) {
return {};
}
return {
instance_groups: instanceGroups || [],
};
}

View File

@@ -27,9 +27,14 @@ const FIELD_NAMES = [
'job_tags', 'job_tags',
'skip_tags', 'skip_tags',
'extra_vars', 'extra_vars',
'labels',
'timeout',
'job_slice_count',
'forks',
'labels',
]; ];
export default function useOtherPromptsStep(launchConfig, resource) { export default function useOtherPromptsStep(launchConfig, resource, labels) {
const [variablesField] = useField('extra_vars'); const [variablesField] = useField('extra_vars');
const [variablesMode, setVariablesMode] = useState(null); const [variablesMode, setVariablesMode] = useState(null);
const [isTouched, setIsTouched] = useState(false); const [isTouched, setIsTouched] = useState(false);
@@ -59,7 +64,7 @@ export default function useOtherPromptsStep(launchConfig, resource) {
return { return {
step: getStep(launchConfig, hasError, variablesMode, handleModeChange), step: getStep(launchConfig, hasError, variablesMode, handleModeChange),
initialValues: getInitialValues(launchConfig, resource), initialValues: getInitialValues(launchConfig, resource, labels),
isReady: true, isReady: true,
contentError: null, contentError: null,
hasError, hasError,
@@ -105,11 +110,15 @@ function shouldShowPrompt(launchConfig) {
launchConfig.ask_skip_tags_on_launch || launchConfig.ask_skip_tags_on_launch ||
launchConfig.ask_variables_on_launch || launchConfig.ask_variables_on_launch ||
launchConfig.ask_scm_branch_on_launch || launchConfig.ask_scm_branch_on_launch ||
launchConfig.ask_diff_mode_on_launch launchConfig.ask_diff_mode_on_launch ||
launchConfig.ask_labels_on_launch ||
launchConfig.ask_forks_on_launch ||
launchConfig.ask_job_slice_count_on_launch ||
launchConfig.ask_timeout_on_launch
); );
} }
function getInitialValues(launchConfig, resource) { function getInitialValues(launchConfig, resource, labels) {
const initialValues = {}; const initialValues = {};
if (!launchConfig) { if (!launchConfig) {
@@ -140,5 +149,17 @@ function getInitialValues(launchConfig, resource) {
if (launchConfig.ask_diff_mode_on_launch) { if (launchConfig.ask_diff_mode_on_launch) {
initialValues.diff_mode = resource?.diff_mode || false; initialValues.diff_mode = resource?.diff_mode || false;
} }
if (launchConfig.ask_forks_on_launch) {
initialValues.forks = resource?.forks || 0;
}
if (launchConfig.ask_job_slice_count_on_launch) {
initialValues.job_slice_count = resource?.job_slice_count || 1;
}
if (launchConfig.ask_timeout_on_launch) {
initialValues.timeout = resource?.timeout || 0;
}
if (launchConfig.ask_labels_on_launch) {
initialValues.labels = labels || [];
}
return initialValues; return initialValues;
} }

View File

@@ -3,9 +3,11 @@ import { useFormikContext } from 'formik';
import useInventoryStep from './steps/useInventoryStep'; import useInventoryStep from './steps/useInventoryStep';
import useCredentialsStep from './steps/useCredentialsStep'; import useCredentialsStep from './steps/useCredentialsStep';
import useCredentialPasswordsStep from './steps/useCredentialPasswordsStep'; import useCredentialPasswordsStep from './steps/useCredentialPasswordsStep';
import useExecutionEnvironmentStep from './steps/useExecutionEnvironmentStep';
import useOtherPromptsStep from './steps/useOtherPromptsStep'; import useOtherPromptsStep from './steps/useOtherPromptsStep';
import useSurveyStep from './steps/useSurveyStep'; import useSurveyStep from './steps/useSurveyStep';
import usePreviewStep from './steps/usePreviewStep'; import usePreviewStep from './steps/usePreviewStep';
import useInstanceGroupsStep from './steps/useInstanceGroupsStep';
function showCredentialPasswordsStep(launchConfig, credentials = []) { function showCredentialPasswordsStep(launchConfig, credentials = []) {
if ( if (
@@ -39,7 +41,13 @@ function showCredentialPasswordsStep(launchConfig, credentials = []) {
return credentialPasswordStepRequired; return credentialPasswordStepRequired;
} }
export default function useLaunchSteps(launchConfig, surveyConfig, resource) { export default function useLaunchSteps(
launchConfig,
surveyConfig,
resource,
labels,
instanceGroups
) {
const [visited, setVisited] = useState({}); const [visited, setVisited] = useState({});
const [isReady, setIsReady] = useState(false); const [isReady, setIsReady] = useState(false);
const { touched, values: formikValues } = useFormikContext(); const { touched, values: formikValues } = useFormikContext();
@@ -56,7 +64,9 @@ export default function useLaunchSteps(launchConfig, surveyConfig, resource) {
showCredentialPasswordsStep(launchConfig, formikValues.credentials), showCredentialPasswordsStep(launchConfig, formikValues.credentials),
visited visited
), ),
useOtherPromptsStep(launchConfig, resource), useExecutionEnvironmentStep(launchConfig, resource),
useInstanceGroupsStep(launchConfig, resource, instanceGroups),
useOtherPromptsStep(launchConfig, resource, labels),
useSurveyStep(launchConfig, surveyConfig, resource, visited), useSurveyStep(launchConfig, surveyConfig, resource, visited),
]; ];
const { resetForm } = useFormikContext(); const { resetForm } = useFormikContext();
@@ -143,6 +153,8 @@ export default function useLaunchSteps(launchConfig, surveyConfig, resource) {
inventory: true, inventory: true,
credentials: true, credentials: true,
credentialPasswords: true, credentialPasswords: true,
executionEnvironment: true,
instanceGroups: true,
other: true, other: true,
survey: true, survey: true,
preview: true, preview: true,

View File

@@ -10,9 +10,9 @@ import { getQSConfig, parseQueryString, mergeParams } from 'util/qs';
import useRequest from 'hooks/useRequest'; import useRequest from 'hooks/useRequest';
import Popover from '../Popover'; import Popover from '../Popover';
import OptionsList from '../OptionsList'; import OptionsList from '../OptionsList';
import Lookup from './Lookup'; import Lookup from './Lookup';
import LookupErrorMessage from './shared/LookupErrorMessage'; import LookupErrorMessage from './shared/LookupErrorMessage';
import FieldWithPrompt from '../FieldWithPrompt';
const QS_CONFIG = getQSConfig('execution_environments', { const QS_CONFIG = getQSConfig('execution_environments', {
page: 1, page: 1,
@@ -36,6 +36,9 @@ function ExecutionEnvironmentLookup({
value, value,
fieldName, fieldName,
overrideLabel, overrideLabel,
isPromptableField,
promptId,
promptName,
}) { }) {
const location = useLocation(); const location = useLocation();
const { const {
@@ -150,49 +153,52 @@ function ExecutionEnvironmentLookup({
}, [fetchExecutionEnvironments]); }, [fetchExecutionEnvironments]);
const renderLookup = () => ( const renderLookup = () => (
<Lookup <>
id={id} <Lookup
header={t`Execution Environment`} id={id}
value={value} header={t`Execution Environment`}
onBlur={onBlur} value={value}
onChange={onChange} onBlur={onBlur}
onUpdate={fetchExecutionEnvironments} onChange={onChange}
onDebounce={checkExecutionEnvironmentName} onUpdate={fetchExecutionEnvironments}
fieldName={fieldName} onDebounce={checkExecutionEnvironmentName}
validate={validate} fieldName={fieldName}
qsConfig={QS_CONFIG} validate={validate}
isLoading={isLoading || isProjectLoading} qsConfig={QS_CONFIG}
isDisabled={isDisabled} isLoading={isLoading || isProjectLoading}
renderOptionsList={({ state, dispatch, canDelete }) => ( isDisabled={isDisabled}
<OptionsList renderOptionsList={({ state, dispatch, canDelete }) => (
value={state.selectedItems} <OptionsList
options={executionEnvironments} value={state.selectedItems}
optionCount={count} options={executionEnvironments}
searchColumns={[ optionCount={count}
{ searchColumns={[
name: t`Name`, {
key: 'name__icontains', name: t`Name`,
isDefault: true, key: 'name__icontains',
}, isDefault: true,
]} },
sortColumns={[ ]}
{ sortColumns={[
name: t`Name`, {
key: 'name', name: t`Name`,
}, key: 'name',
]} },
searchableKeys={searchableKeys} ]}
relatedSearchableKeys={relatedSearchableKeys} searchableKeys={searchableKeys}
multiple={state.multiple} relatedSearchableKeys={relatedSearchableKeys}
header={t`Execution Environment`} multiple={state.multiple}
name="executionEnvironments" header={t`Execution Environment`}
qsConfig={QS_CONFIG} name="executionEnvironments"
readOnly={!canDelete} qsConfig={QS_CONFIG}
selectItem={(item) => dispatch({ type: 'SELECT_ITEM', item })} readOnly={!canDelete}
deselectItem={(item) => dispatch({ type: 'DESELECT_ITEM', item })} selectItem={(item) => dispatch({ type: 'SELECT_ITEM', item })}
/> deselectItem={(item) => dispatch({ type: 'DESELECT_ITEM', item })}
)} />
/> )}
/>
<LookupErrorMessage error={error || fetchProjectError} />
</>
); );
const renderLabel = () => { const renderLabel = () => {
@@ -202,7 +208,21 @@ function ExecutionEnvironmentLookup({
return t`Execution Environment`; return t`Execution Environment`;
}; };
return ( return isPromptableField ? (
<FieldWithPrompt
fieldId={id}
label={renderLabel()}
promptId={promptId}
promptName={promptName}
tooltip={popoverContent}
>
{tooltip && isDisabled ? (
<Tooltip content={tooltip}>{renderLookup()}</Tooltip>
) : (
renderLookup()
)}
</FieldWithPrompt>
) : (
<FormGroup <FormGroup
fieldId={id} fieldId={id}
label={renderLabel()} label={renderLabel()}

View File

@@ -66,6 +66,9 @@ describe('ExecutionEnvironmentLookup', () => {
expect( expect(
wrapper.find('FormGroup[label="Execution Environment"]').length wrapper.find('FormGroup[label="Execution Environment"]').length
).toBe(1); ).toBe(1);
expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe(
0
);
}); });
test('should fetch execution environments', async () => { test('should fetch execution environments', async () => {
@@ -132,4 +135,25 @@ describe('ExecutionEnvironmentLookup', () => {
page_size: 5, page_size: 5,
}); });
}); });
test('should render prompt on launch checkbox when necessary', async () => {
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<ExecutionEnvironmentLookup
value={executionEnvironment}
onChange={() => {}}
projectId={12}
globallyAvailable
isPromptableField
promptId="ee-prompt"
promptName="ask_execution_environment_on_launch"
/>
</Formik>
);
});
expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe(
1
);
});
}); });

View File

@@ -1,7 +1,6 @@
import React, { useCallback, useEffect } from 'react'; import React, { useCallback, useEffect } from 'react';
import { arrayOf, string, func, bool } from 'prop-types'; import { arrayOf, string, func, bool } from 'prop-types';
import { withRouter } from 'react-router-dom'; import { withRouter } from 'react-router-dom';
import { t, Trans } from '@lingui/macro'; import { t, Trans } from '@lingui/macro';
import { FormGroup } from '@patternfly/react-core'; import { FormGroup } from '@patternfly/react-core';
import { InstanceGroupsAPI } from 'api'; import { InstanceGroupsAPI } from 'api';
@@ -13,6 +12,7 @@ import Popover from '../Popover';
import OptionsList from '../OptionsList'; import OptionsList from '../OptionsList';
import Lookup from './Lookup'; import Lookup from './Lookup';
import LookupErrorMessage from './shared/LookupErrorMessage'; import LookupErrorMessage from './shared/LookupErrorMessage';
import FieldWithPrompt from '../FieldWithPrompt';
const QS_CONFIG = getQSConfig('instance-groups', { const QS_CONFIG = getQSConfig('instance-groups', {
page: 1, page: 1,
@@ -21,6 +21,7 @@ const QS_CONFIG = getQSConfig('instance-groups', {
}); });
function InstanceGroupsLookup({ function InstanceGroupsLookup({
id,
value, value,
onChange, onChange,
tooltip, tooltip,
@@ -29,6 +30,9 @@ function InstanceGroupsLookup({
history, history,
fieldName, fieldName,
validate, validate,
isPromptableField,
promptId,
promptName,
}) { }) {
const { const {
result: { instanceGroups, count, relatedSearchableKeys, searchableKeys }, result: { instanceGroups, count, relatedSearchableKeys, searchableKeys },
@@ -63,13 +67,8 @@ function InstanceGroupsLookup({
fetchInstanceGroups(); fetchInstanceGroups();
}, [fetchInstanceGroups]); }, [fetchInstanceGroups]);
return ( const renderLookup = () => (
<FormGroup <>
className={className}
label={t`Instance Groups`}
labelIcon={tooltip && <Popover content={tooltip} />}
fieldId="org-instance-groups"
>
<Lookup <Lookup
id="org-instance-groups" id="org-instance-groups"
header={t`Instance Groups`} header={t`Instance Groups`}
@@ -133,11 +132,33 @@ function InstanceGroupsLookup({
)} )}
/> />
<LookupErrorMessage error={error} /> <LookupErrorMessage error={error} />
</>
);
return isPromptableField ? (
<FieldWithPrompt
fieldId={id}
label={t`Instance Groups`}
promptId={promptId}
promptName={promptName}
tooltip={tooltip}
>
{renderLookup()}
</FieldWithPrompt>
) : (
<FormGroup
className={className}
label={t`Instance Groups`}
labelIcon={tooltip && <Popover content={tooltip} />}
fieldId={id}
>
{renderLookup()}
</FormGroup> </FormGroup>
); );
} }
InstanceGroupsLookup.propTypes = { InstanceGroupsLookup.propTypes = {
id: string,
value: arrayOf(InstanceGroup).isRequired, value: arrayOf(InstanceGroup).isRequired,
tooltip: string, tooltip: string,
onChange: func.isRequired, onChange: func.isRequired,
@@ -148,6 +169,7 @@ InstanceGroupsLookup.propTypes = {
}; };
InstanceGroupsLookup.defaultProps = { InstanceGroupsLookup.defaultProps = {
id: 'org-instance-groups',
tooltip: '', tooltip: '',
className: '', className: '',
required: false, required: false,

View File

@@ -0,0 +1,111 @@
import React from 'react';
import { act } from 'react-dom/test-utils';
import { Formik } from 'formik';
import { InstanceGroupsAPI } from 'api';
import { mountWithContexts } from '../../../testUtils/enzymeHelpers';
import InstanceGroupsLookup from './InstanceGroupsLookup';
jest.mock('../../api');
const mockedInstanceGroups = {
count: 1,
results: [
{
id: 2,
name: 'Foo',
image: 'quay.io/ansible/awx-ee',
pull: 'missing',
},
],
};
const instanceGroups = [
{
id: 1,
type: 'instance_group',
url: '/api/v2/instance_groups/1/',
related: {
jobs: '/api/v2/instance_groups/1/jobs/',
instances: '/api/v2/instance_groups/1/instances/',
},
name: 'controlplane',
created: '2022-09-13T15:44:54.870579Z',
modified: '2022-09-13T15:44:54.886047Z',
capacity: 59,
consumed_capacity: 0,
percent_capacity_remaining: 100.0,
jobs_running: 0,
jobs_total: 40,
instances: 1,
is_container_group: false,
credential: null,
policy_instance_percentage: 100,
policy_instance_minimum: 0,
policy_instance_list: [],
pod_spec_override: '',
summary_fields: {
user_capabilities: {
edit: true,
delete: false,
},
},
},
];
describe('InstanceGroupsLookup', () => {
let wrapper;
beforeEach(() => {
InstanceGroupsAPI.read.mockResolvedValue({
data: mockedInstanceGroups,
});
});
afterEach(() => {
jest.clearAllMocks();
});
test('should render successfully', async () => {
InstanceGroupsAPI.readOptions.mockReturnValue({
data: {
actions: {
GET: {},
POST: {},
},
related_search_fields: [],
},
});
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<InstanceGroupsLookup value={instanceGroups} onChange={() => {}} />
</Formik>
);
});
wrapper.update();
expect(InstanceGroupsAPI.read).toHaveBeenCalledTimes(1);
expect(wrapper.find('InstanceGroupsLookup')).toHaveLength(1);
expect(wrapper.find('FormGroup[label="Instance Groups"]').length).toBe(1);
expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe(
0
);
});
test('should render prompt on launch checkbox when necessary', async () => {
await act(async () => {
wrapper = mountWithContexts(
<Formik>
<InstanceGroupsLookup
value={instanceGroups}
onChange={() => {}}
isPromptableField
promptId="ig-prompt"
promptName="ask_instance_groups_on_launch"
/>
</Formik>
);
});
expect(wrapper.find('Checkbox[aria-label="Prompt on launch"]').length).toBe(
1
);
});
});

View File

@@ -17,13 +17,21 @@ export default function useSyncedSelectValue(value, onChange) {
return; return;
} }
const newOptions = []; const newOptions = [];
if (value !== selections && options.length) { if (value && value !== selections && options.length) {
const syncedValue = value.map((item) => { const syncedValue = value.map((item) => {
const match = options.find((i) => i.id === item.id); const match = options.find((i) => i.id === item.id);
if (!match) { if (!match) {
newOptions.push(item); newOptions.push(item);
} }
return match || item;
if (match) {
if (item.isReadOnly) {
match.isReadOnly = true;
}
return match;
}
return item;
}); });
setSelections(syncedValue); setSelections(syncedValue);
} }

View File

@@ -35,6 +35,9 @@ function formatTimeout(timeout) {
if (typeof timeout === 'undefined' || timeout === null) { if (typeof timeout === 'undefined' || timeout === null) {
return null; return null;
} }
if (typeof timeout === 'string') {
return timeout;
}
const minutes = Math.floor(timeout / 60); const minutes = Math.floor(timeout / 60);
const seconds = timeout - Math.floor(timeout / 60) * 60; const seconds = timeout - Math.floor(timeout / 60) * 60;
return ( return (
@@ -71,7 +74,13 @@ function hasPromptData(launchData) {
launchData.ask_skip_tags_on_launch || launchData.ask_skip_tags_on_launch ||
launchData.ask_tags_on_launch || launchData.ask_tags_on_launch ||
launchData.ask_variables_on_launch || launchData.ask_variables_on_launch ||
launchData.ask_verbosity_on_launch launchData.ask_verbosity_on_launch ||
launchData.ask_execution_environment_on_launch ||
launchData.ask_labels_on_launch ||
launchData.ask_forks_on_launch ||
launchData.ask_job_slice_count_on_launch ||
launchData.ask_timeout_on_launch ||
launchData.ask_instance_groups_on_launch
); );
} }
@@ -206,6 +215,36 @@ function PromptDetail({
value={overrides.inventory?.name} value={overrides.inventory?.name}
/> />
)} )}
{launchConfig.ask_execution_environment_on_launch && (
<Detail
label={t`Execution Environment`}
value={overrides.execution_environment?.name}
/>
)}
{launchConfig.ask_instance_groups_on_launch && (
<Detail
fullWidth
label={t`Instance Groups`}
rows={4}
value={
<ChipGroup
numChips={5}
totalChips={overrides.instance_groups.length}
ouiaId="prompt-instance-groups-chips"
>
{overrides.instance_groups.map((instance_group) => (
<Chip
key={instance_group.id}
ouiaId={`instance-group-${instance_group.id}-chip`}
isReadOnly
>
{instance_group.name}
</Chip>
))}
</ChipGroup>
}
/>
)}
{launchConfig.ask_scm_branch_on_launch && ( {launchConfig.ask_scm_branch_on_launch && (
<Detail <Detail
label={t`Source Control Branch`} label={t`Source Control Branch`}
@@ -278,6 +317,45 @@ function PromptDetail({
} }
/> />
)} )}
{launchConfig.ask_labels_on_launch && (
<Detail
fullWidth
label={t`Labels`}
value={
<ChipGroup
numChips={5}
totalChips={overrides.labels.length}
ouiaId="prompt-label-chips"
>
{overrides.labels.map((label) => (
<Chip
key={label.id}
ouiaId={`label-${label.id}-chip`}
isReadOnly
>
{label.name}
</Chip>
))}
</ChipGroup>
}
isEmpty={overrides.labels.length === 0}
/>
)}
{launchConfig.ask_forks_on_launch && (
<Detail label={t`Forks`} value={overrides.forks} />
)}
{launchConfig.ask_job_slice_count_on_launch && (
<Detail
label={t`Job Slicing`}
value={overrides.job_slice_count}
/>
)}
{launchConfig.ask_timeout_on_launch && (
<Detail
label={t`Timeout`}
value={formatTimeout(overrides?.timeout)}
/>
)}
{launchConfig.ask_diff_mode_on_launch && ( {launchConfig.ask_diff_mode_on_launch && (
<Detail <Detail
label={t`Show Changes`} label={t`Show Changes`}

View File

@@ -15,6 +15,12 @@ const mockPromptLaunch = {
ask_tags_on_launch: true, ask_tags_on_launch: true,
ask_variables_on_launch: true, ask_variables_on_launch: true,
ask_verbosity_on_launch: true, ask_verbosity_on_launch: true,
ask_execution_environment_on_launch: true,
ask_labels_on_launch: true,
ask_forks_on_launch: true,
ask_job_slice_count_on_launch: true,
ask_timeout_on_launch: true,
ask_instance_groups_on_launch: true,
defaults: { defaults: {
extra_vars: '---foo: bar', extra_vars: '---foo: bar',
diff_mode: false, diff_mode: false,
@@ -40,6 +46,10 @@ const mockPromptLaunch = {
}, },
], ],
scm_branch: 'Foo branch', scm_branch: 'Foo branch',
execution_environment: 1,
forks: 1,
job_slice_count: 1,
timeout: 100,
}, },
}; };
@@ -73,9 +83,20 @@ describe('PromptDetail', () => {
assertDetail('Limit', 'localhost'); assertDetail('Limit', 'localhost');
assertDetail('Verbosity', '3 (Debug)'); assertDetail('Verbosity', '3 (Debug)');
assertDetail('Show Changes', 'Off'); assertDetail('Show Changes', 'Off');
assertDetail('Timeout', '1 min 40 sec');
assertDetail('Forks', '1');
assertDetail('Job Slicing', '1');
expect(wrapper.find('VariablesDetail').prop('value')).toEqual( expect(wrapper.find('VariablesDetail').prop('value')).toEqual(
'---foo: bar' '---foo: bar'
); );
expect(
wrapper
.find('Detail[label="Labels"]')
.containsAllMatchingElements([
<span>L_91o2</span>,
<span>L_91o3</span>,
])
).toEqual(true);
expect( expect(
wrapper wrapper
.find('Detail[label="Credentials"]') .find('Detail[label="Credentials"]')
@@ -151,6 +172,19 @@ describe('PromptDetail', () => {
job_type: 'check', job_type: 'check',
scm_branch: 'Bar branch', scm_branch: 'Bar branch',
diff_mode: true, diff_mode: true,
forks: 2,
job_slice_count: 2,
timeout: 160,
labels: [
{ name: 'foo', id: 1 },
{ name: 'bar', id: 2 },
],
instance_groups: [
{
id: 1,
name: 'controlplane',
},
],
}; };
beforeAll(() => { beforeAll(() => {
@@ -182,9 +216,17 @@ describe('PromptDetail', () => {
assertDetail('Limit', 'otherlimit'); assertDetail('Limit', 'otherlimit');
assertDetail('Verbosity', '0 (Normal)'); assertDetail('Verbosity', '0 (Normal)');
assertDetail('Show Changes', 'On'); assertDetail('Show Changes', 'On');
assertDetail('Timeout', '2 min 40 sec');
assertDetail('Forks', '2');
assertDetail('Job Slicing', '2');
expect(wrapper.find('VariablesDetail').prop('value')).toEqual( expect(wrapper.find('VariablesDetail').prop('value')).toEqual(
'---one: two\nbar: baz' '---one: two\nbar: baz'
); );
expect(
wrapper
.find('Detail[label="Labels"]')
.containsAllMatchingElements([<span>foo</span>, <span>bar</span>])
).toEqual(true);
expect( expect(
wrapper wrapper
.find('Detail[label="Credentials"]') .find('Detail[label="Credentials"]')

View File

@@ -146,7 +146,10 @@ function PromptJobTemplateDetail({ resource }) {
/> />
<Detail label={t`Source Control Branch`} value={scm_branch} /> <Detail label={t`Source Control Branch`} value={scm_branch} />
<Detail label={t`Playbook`} value={playbook} /> <Detail label={t`Playbook`} value={playbook} />
<Detail label={t`Forks`} value={forks || '0'} /> <Detail
label={t`Forks`}
value={typeof forks === 'number' ? forks.toString() : forks}
/>
<Detail label={t`Limit`} value={limit} /> <Detail label={t`Limit`} value={limit} />
<Detail label={t`Verbosity`} value={VERBOSITY()[verbosity]} /> <Detail label={t`Verbosity`} value={VERBOSITY()[verbosity]} />
{typeof diff_mode === 'boolean' && ( {typeof diff_mode === 'boolean' && (

View File

@@ -3,159 +3,163 @@
"type": "job_template", "type": "job_template",
"url": "/api/v2/job_templates/7/", "url": "/api/v2/job_templates/7/",
"related": { "related": {
"named_url": "/api/v2/job_templates/MockJT/", "named_url": "/api/v2/job_templates/MockJT/",
"created_by": "/api/v2/users/1/", "created_by": "/api/v2/users/1/",
"modified_by": "/api/v2/users/1/", "modified_by": "/api/v2/users/1/",
"labels": "/api/v2/job_templates/7/labels/", "labels": "/api/v2/job_templates/7/labels/",
"inventory": "/api/v2/inventories/1/", "inventory": "/api/v2/inventories/1/",
"project": "/api/v2/projects/6/", "project": "/api/v2/projects/6/",
"credentials": "/api/v2/job_templates/7/credentials/", "credentials": "/api/v2/job_templates/7/credentials/",
"last_job": "/api/v2/jobs/12/", "last_job": "/api/v2/jobs/12/",
"jobs": "/api/v2/job_templates/7/jobs/", "jobs": "/api/v2/job_templates/7/jobs/",
"schedules": "/api/v2/job_templates/7/schedules/", "schedules": "/api/v2/job_templates/7/schedules/",
"activity_stream": "/api/v2/job_templates/7/activity_stream/", "activity_stream": "/api/v2/job_templates/7/activity_stream/",
"launch": "/api/v2/job_templates/7/launch/", "launch": "/api/v2/job_templates/7/launch/",
"webhook_key": "/api/v2/job_templates/7/webhook_key/", "webhook_key": "/api/v2/job_templates/7/webhook_key/",
"webhook_receiver": "/api/v2/job_templates/7/github/", "webhook_receiver": "/api/v2/job_templates/7/github/",
"notification_templates_started": "/api/v2/job_templates/7/notification_templates_started/", "notification_templates_started": "/api/v2/job_templates/7/notification_templates_started/",
"notification_templates_success": "/api/v2/job_templates/7/notification_templates_success/", "notification_templates_success": "/api/v2/job_templates/7/notification_templates_success/",
"notification_templates_error": "/api/v2/job_templates/7/notification_templates_error/", "notification_templates_error": "/api/v2/job_templates/7/notification_templates_error/",
"access_list": "/api/v2/job_templates/7/access_list/", "access_list": "/api/v2/job_templates/7/access_list/",
"survey_spec": "/api/v2/job_templates/7/survey_spec/", "survey_spec": "/api/v2/job_templates/7/survey_spec/",
"object_roles": "/api/v2/job_templates/7/object_roles/", "object_roles": "/api/v2/job_templates/7/object_roles/",
"instance_groups": "/api/v2/job_templates/7/instance_groups/", "instance_groups": "/api/v2/job_templates/7/instance_groups/",
"slice_workflow_jobs": "/api/v2/job_templates/7/slice_workflow_jobs/", "slice_workflow_jobs": "/api/v2/job_templates/7/slice_workflow_jobs/",
"copy": "/api/v2/job_templates/7/copy/", "copy": "/api/v2/job_templates/7/copy/",
"callback": "/api/v2/job_templates/7/callback/", "callback": "/api/v2/job_templates/7/callback/",
"webhook_credential": "/api/v2/credentials/8/" "webhook_credential": "/api/v2/credentials/8/"
}, },
"summary_fields": { "summary_fields": {
"inventory": { "inventory": {
"id": 1, "id": 1,
"name": "Demo Inventory", "name": "Demo Inventory",
"description": "", "description": "",
"has_active_failures": false, "has_active_failures": false,
"total_hosts": 1, "total_hosts": 1,
"hosts_with_active_failures": 0, "hosts_with_active_failures": 0,
"total_groups": 0, "total_groups": 0,
"groups_with_active_failures": 0, "groups_with_active_failures": 0,
"has_inventory_sources": false, "has_inventory_sources": false,
"total_inventory_sources": 0, "total_inventory_sources": 0,
"inventory_sources_with_failures": 0, "inventory_sources_with_failures": 0,
"organization_id": 1, "organization_id": 1,
"kind": "" "kind": ""
},
"execution_environment": {
"id": 1,
"name": "Default EE",
"description": "",
"image": "quay.io/ansible/awx-ee"
},
"project": {
"id": 6,
"name": "Mock Project",
"description": "",
"status": "successful",
"scm_type": "git"
},
"last_job": {
"id": 12,
"name": "Mock JT",
"description": "",
"finished": "2019-10-01T14:34:35.142483Z",
"status": "successful",
"failed": false
},
"last_update": {
"id": 12,
"name": "Mock JT",
"description": "",
"status": "successful",
"failed": false
},
"webhook_credential": {
"id": 8,
"name": "GitHub Cred",
"description": "",
"kind": "github_token",
"cloud": false,
"credential_type_id": 12
},
"created_by": {
"id": 1,
"username": "admin",
"first_name": "",
"last_name": ""
},
"modified_by": {
"id": 1,
"username": "admin",
"first_name": "",
"last_name": ""
},
"object_roles": {
"admin_role": {
"description": "Can manage all aspects of the job template",
"name": "Admin",
"id": 24
},
"execute_role": {
"description": "May run the job template",
"name": "Execute",
"id": 25
},
"read_role": {
"description": "May view settings for the job template",
"name": "Read",
"id": 26
}
},
"user_capabilities": {
"edit": true,
"delete": true,
"start": true,
"schedule": true,
"copy": true
},
"labels": {
"count": 1,
"results": [
{
"id": 91,
"name": "L_91o2"
},
{
"id": 92,
"name": "L_91o3"
}
]
}, },
"survey": { "execution_environment": {
"title": "", "id": 1,
"description": "" "name": "Default EE",
"description": "",
"image": "quay.io/ansible/awx-ee"
},
"project": {
"id": 6,
"name": "Mock Project",
"description": "",
"status": "successful",
"scm_type": "git"
},
"last_job": {
"id": 12,
"name": "Mock JT",
"description": "",
"finished": "2019-10-01T14:34:35.142483Z",
"status": "successful",
"failed": false
},
"last_update": {
"id": 12,
"name": "Mock JT",
"description": "",
"status": "successful",
"failed": false
},
"webhook_credential": {
"id": 8,
"name": "GitHub Cred",
"description": "",
"kind": "github_token",
"cloud": false,
"credential_type_id": 12
},
"created_by": {
"id": 1,
"username": "admin",
"first_name": "",
"last_name": ""
},
"modified_by": {
"id": 1,
"username": "admin",
"first_name": "",
"last_name": ""
},
"object_roles": {
"admin_role": {
"description": "Can manage all aspects of the job template",
"name": "Admin",
"id": 24
}, },
"recent_jobs": [ "execute_role": {
{ "description": "May run the job template",
"id": 12, "name": "Execute",
"status": "successful", "id": 25
"finished": "2019-10-01T14:34:35.142483Z", },
"type": "job" "read_role": {
}, "description": "May view settings for the job template",
{ "name": "Read",
"id": 13, "id": 26
"status": "successful", }
"finished": "2019-10-01T14:34:35.142483Z", },
"type": "job" "user_capabilities": {
} "edit": true,
], "delete": true,
"credentials": [ "start": true,
"schedule": true,
"copy": true
},
"labels": {
"count": 1,
"results": [
{ {
"id": 1, "kind": "ssh" , "name": "Credential 1" "id": 91,
"name": "L_91o2"
}, },
{ {
"id": 2, "kind": "awx" , "name": "Credential 2" "id": 92,
"name": "L_91o3"
} }
] ]
},
"survey": {
"title": "",
"description": ""
},
"recent_jobs": [
{
"id": 12,
"status": "successful",
"finished": "2019-10-01T14:34:35.142483Z",
"type": "job"
},
{
"id": 13,
"status": "successful",
"finished": "2019-10-01T14:34:35.142483Z",
"type": "job"
}
],
"credentials": [
{
"id": 1,
"kind": "ssh",
"name": "Credential 1"
},
{
"id": 2,
"kind": "awx",
"name": "Credential 2"
}
]
}, },
"created": "2019-09-30T16:18:34.564820Z", "created": "2019-09-30T16:18:34.564820Z",
"modified": "2019-10-01T14:47:31.818431Z", "modified": "2019-10-01T14:47:31.818431Z",

View File

@@ -1,12 +1,10 @@
import React, { useState } from 'react'; import React, { useState } from 'react';
import { func, shape } from 'prop-types'; import { func, shape } from 'prop-types';
import { useHistory, useLocation } from 'react-router-dom'; import { useHistory, useLocation } from 'react-router-dom';
import { Card } from '@patternfly/react-core'; import { Card } from '@patternfly/react-core';
import yaml from 'js-yaml'; import yaml from 'js-yaml';
import { parseVariableField } from 'util/yaml'; import { parseVariableField } from 'util/yaml';
import { OrganizationsAPI, SchedulesAPI } from 'api';
import { SchedulesAPI } from 'api';
import mergeExtraVars from 'util/prompt/mergeExtraVars'; import mergeExtraVars from 'util/prompt/mergeExtraVars';
import getSurveyValues from 'util/prompt/getSurveyValues'; import getSurveyValues from 'util/prompt/getSurveyValues';
import { getAddedAndRemoved } from 'util/lists'; import { getAddedAndRemoved } from 'util/lists';
@@ -34,6 +32,8 @@ function ScheduleAdd({
surveyConfiguration surveyConfiguration
) => { ) => {
const { const {
execution_environment,
instance_groups,
inventory, inventory,
frequency, frequency,
frequencyOptions, frequencyOptions,
@@ -41,6 +41,7 @@ function ScheduleAdd({
exceptionOptions, exceptionOptions,
timezone, timezone,
credentials, credentials,
labels,
...submitValues ...submitValues
} = values; } = values;
const { added } = getAddedAndRemoved( const { added } = getAddedAndRemoved(
@@ -72,6 +73,10 @@ function ScheduleAdd({
submitValues.inventory = inventory.id; submitValues.inventory = inventory.id;
} }
if (execution_environment) {
submitValues.execution_environment = execution_environment.id;
}
try { try {
const ruleSet = buildRuleSet(values); const ruleSet = buildRuleSet(values);
const requestData = { const requestData = {
@@ -94,13 +99,46 @@ function ScheduleAdd({
const { const {
data: { id: scheduleId }, data: { id: scheduleId },
} = await apiModel.createSchedule(resource.id, requestData); } = await apiModel.createSchedule(resource.id, requestData);
if (credentials?.length > 0) {
await Promise.all( let labelsPromises = [];
added.map(({ id: credentialId }) => let credentialsPromises = [];
SchedulesAPI.associateCredential(scheduleId, credentialId)
) if (launchConfiguration?.ask_labels_on_launch && labels) {
let organizationId = resource.organization;
if (!organizationId) {
// eslint-disable-next-line no-useless-catch
try {
const {
data: { results },
} = await OrganizationsAPI.read();
organizationId = results[0].id;
} catch (err) {
throw err;
}
}
labelsPromises = labels.map((label) =>
SchedulesAPI.associateLabel(scheduleId, label, organizationId)
); );
} }
if (launchConfiguration?.ask_credential_on_launch && added?.length > 0) {
credentialsPromises = added.map(({ id: credentialId }) =>
SchedulesAPI.associateCredential(scheduleId, credentialId)
);
}
await Promise.all([labelsPromises, credentialsPromises]);
if (
launchConfiguration?.ask_instance_groups_on_launch &&
instance_groups
) {
/* eslint-disable no-await-in-loop, no-restricted-syntax */
for (const group of instance_groups) {
await SchedulesAPI.associateInstanceGroup(scheduleId, group.id);
}
}
history.push(`${pathRoot}schedules/${scheduleId}`); history.push(`${pathRoot}schedules/${scheduleId}`);
} catch (err) { } catch (err) {
setFormSubmitError(err); setFormSubmitError(err);

View File

@@ -1,11 +1,21 @@
import React from 'react'; import React from 'react';
import { act } from 'react-dom/test-utils'; import { act } from 'react-dom/test-utils';
import { RRule } from 'rrule'; import { RRule } from 'rrule';
import { SchedulesAPI, JobTemplatesAPI, InventoriesAPI } from 'api'; import {
CredentialsAPI,
CredentialTypesAPI,
SchedulesAPI,
JobTemplatesAPI,
InventoriesAPI,
} from 'api';
import { mountWithContexts } from '../../../../testUtils/enzymeHelpers'; import { mountWithContexts } from '../../../../testUtils/enzymeHelpers';
import ScheduleAdd from './ScheduleAdd'; import ScheduleAdd from './ScheduleAdd';
jest.mock('../../../api'); jest.mock('../../../api/models/Credentials');
jest.mock('../../../api/models/CredentialTypes');
jest.mock('../../../api/models/Schedules');
jest.mock('../../../api/models/JobTemplates');
jest.mock('../../../api/models/Inventories');
const launchConfig = { const launchConfig = {
can_start_without_user_input: false, can_start_without_user_input: false,
@@ -19,7 +29,7 @@ const launchConfig = {
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: false, ask_credential_on_launch: true,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -57,6 +67,33 @@ describe('<ScheduleAdd />', () => {
], ],
}); });
JobTemplatesAPI.createSchedule.mockResolvedValue({ data: { id: 3 } }); JobTemplatesAPI.createSchedule.mockResolvedValue({ data: { id: 3 } });
CredentialTypesAPI.loadAllTypes.mockResolvedValue([
{ id: 1, name: 'ssh', kind: 'ssh' },
]);
CredentialsAPI.read.mockResolvedValue({
data: {
count: 1,
results: [
{
id: 10,
name: 'cred 1',
kind: 'ssh',
url: '',
credential_type: 1,
},
],
},
});
CredentialsAPI.readOptions.mockResolvedValue({
data: {
related_search_fields: [],
actions: { GET: { filterabled: true } },
},
});
await act(async () => { await act(async () => {
wrapper = mountWithContexts( wrapper = mountWithContexts(
<ScheduleAdd <ScheduleAdd
@@ -70,6 +107,7 @@ describe('<ScheduleAdd />', () => {
description: '', description: '',
}} }}
launchConfig={launchConfig} launchConfig={launchConfig}
surveyConfig={{}}
/> />
); );
}); });
@@ -390,6 +428,7 @@ describe('<ScheduleAdd />', () => {
wrapper.find('Button[aria-label="Prompt"]').prop('onClick')() wrapper.find('Button[aria-label="Prompt"]').prop('onClick')()
); );
wrapper.update(); wrapper.update();
// Inventory step
expect(wrapper.find('WizardNavItem').at(0).prop('isCurrent')).toBe(true); expect(wrapper.find('WizardNavItem').at(0).prop('isCurrent')).toBe(true);
await act(async () => { await act(async () => {
wrapper.find('td#check-action-item-1').find('input').simulate('click'); wrapper.find('td#check-action-item-1').find('input').simulate('click');
@@ -402,7 +441,21 @@ describe('<ScheduleAdd />', () => {
wrapper.find('WizardFooterInternal').prop('onNext')() wrapper.find('WizardFooterInternal').prop('onNext')()
); );
wrapper.update(); wrapper.update();
// Credential step
expect(wrapper.find('WizardNavItem').at(1).prop('isCurrent')).toBe(true); expect(wrapper.find('WizardNavItem').at(1).prop('isCurrent')).toBe(true);
await act(async () => {
wrapper.find('td#check-action-item-10').find('input').simulate('click');
});
wrapper.update();
expect(
wrapper.find('td#check-action-item-10').find('input').prop('checked')
).toBe(true);
await act(async () =>
wrapper.find('WizardFooterInternal').prop('onNext')()
);
wrapper.update();
// Preview step
expect(wrapper.find('WizardNavItem').at(2).prop('isCurrent')).toBe(true);
await act(async () => await act(async () =>
wrapper.find('WizardFooterInternal').prop('onNext')() wrapper.find('WizardFooterInternal').prop('onNext')()
); );
@@ -414,10 +467,7 @@ describe('<ScheduleAdd />', () => {
frequency: [], frequency: [],
skip_tags: '', skip_tags: '',
inventory: { name: 'inventory', id: 45 }, inventory: { name: 'inventory', id: 45 },
credentials: [ credentials: [{ name: 'cred 1', id: 10 }],
{ name: 'cred 1', id: 10 },
{ name: 'cred 2', id: 20 },
],
startDate: '2021-01-28', startDate: '2021-01-28',
startTime: '2:15 PM', startTime: '2:15 PM',
timezone: 'America/New_York', timezone: 'America/New_York',
@@ -434,7 +484,6 @@ describe('<ScheduleAdd />', () => {
skip_tags: '', skip_tags: '',
}); });
expect(SchedulesAPI.associateCredential).toBeCalledWith(3, 10); expect(SchedulesAPI.associateCredential).toBeCalledWith(3, 10);
expect(SchedulesAPI.associateCredential).toBeCalledWith(3, 20);
}); });
test('should submit survey with default values properly, without opening prompt wizard', async () => { test('should submit survey with default values properly, without opening prompt wizard', async () => {

View File

@@ -27,6 +27,11 @@ import { VariablesDetail } from '../../CodeEditor';
import { VERBOSITY } from '../../VerbositySelectField'; import { VERBOSITY } from '../../VerbositySelectField';
import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext'; import getHelpText from '../../../screens/Template/shared/JobTemplate.helptext';
const buildLinkURL = (instance) =>
instance.is_container_group
? '/instance_groups/container_group/'
: '/instance_groups/';
const PromptDivider = styled(Divider)` const PromptDivider = styled(Divider)`
margin-top: var(--pf-global--spacer--lg); margin-top: var(--pf-global--spacer--lg);
margin-bottom: var(--pf-global--spacer--lg); margin-bottom: var(--pf-global--spacer--lg);
@@ -73,8 +78,11 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
diff_mode, diff_mode,
dtend, dtend,
dtstart, dtstart,
execution_environment,
extra_data, extra_data,
forks,
inventory, inventory,
job_slice_count,
job_tags, job_tags,
job_type, job_type,
limit, limit,
@@ -85,6 +93,7 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
scm_branch, scm_branch,
skip_tags, skip_tags,
summary_fields, summary_fields,
timeout,
timezone, timezone,
verbosity, verbosity,
} = schedule; } = schedule;
@@ -108,7 +117,7 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
const { error, dismissError } = useDismissableError(deleteError); const { error, dismissError } = useDismissableError(deleteError);
const { const {
result: [credentials, preview, launchData], result: [credentials, preview, launchData, labels, instanceGroups],
isLoading, isLoading,
error: readContentError, error: readContentError,
request: fetchCredentialsAndPreview, request: fetchCredentialsAndPreview,
@@ -128,7 +137,9 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
promises.push( promises.push(
JobTemplatesAPI.readLaunch( JobTemplatesAPI.readLaunch(
schedule.summary_fields.unified_job_template.id schedule.summary_fields.unified_job_template.id
) ),
SchedulesAPI.readAllLabels(id),
SchedulesAPI.readInstanceGroups(id)
); );
} else if ( } else if (
schedule?.summary_fields?.unified_job_template?.unified_job_type === schedule?.summary_fields?.unified_job_template?.unified_job_type ===
@@ -137,17 +148,28 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
promises.push( promises.push(
WorkflowJobTemplatesAPI.readLaunch( WorkflowJobTemplatesAPI.readLaunch(
schedule.summary_fields.unified_job_template.id schedule.summary_fields.unified_job_template.id
) ),
SchedulesAPI.readAllLabels(id)
); );
} else { } else {
promises.push(Promise.resolve()); promises.push(Promise.resolve());
} }
const [{ data }, { data: schedulePreview }, launch] = await Promise.all( const [
promises { data },
); { data: schedulePreview },
launch,
allLabelsResults,
instanceGroupsResults,
] = await Promise.all(promises);
return [data.results, schedulePreview, launch?.data]; return [
data.results,
schedulePreview,
launch?.data,
allLabelsResults?.data?.results,
instanceGroupsResults?.data?.results,
];
}, [id, schedule, rrule]), }, [id, schedule, rrule]),
[] []
); );
@@ -185,6 +207,12 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
ask_tags_on_launch, ask_tags_on_launch,
ask_variables_on_launch, ask_variables_on_launch,
ask_verbosity_on_launch, ask_verbosity_on_launch,
ask_execution_environment_on_launch,
ask_labels_on_launch,
ask_forks_on_launch,
ask_job_slice_count_on_launch,
ask_timeout_on_launch,
ask_instance_groups_on_launch,
survey_enabled, survey_enabled,
} = launchData || {}; } = launchData || {};
@@ -239,6 +267,16 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
const showJobTypeDetail = ask_job_type_on_launch && job_type; const showJobTypeDetail = ask_job_type_on_launch && job_type;
const showSCMBranchDetail = ask_scm_branch_on_launch && scm_branch; const showSCMBranchDetail = ask_scm_branch_on_launch && scm_branch;
const showVerbosityDetail = ask_verbosity_on_launch && VERBOSITY()[verbosity]; const showVerbosityDetail = ask_verbosity_on_launch && VERBOSITY()[verbosity];
const showExecutionEnvironmentDetail =
ask_execution_environment_on_launch && execution_environment;
const showLabelsDetail = ask_labels_on_launch && labels && labels.length > 0;
const showForksDetail = ask_forks_on_launch && typeof forks === 'number';
const showJobSlicingDetail =
ask_job_slice_count_on_launch && typeof job_slice_count === 'number';
const showTimeoutDetail =
ask_timeout_on_launch && typeof timeout === 'number';
const showInstanceGroupsDetail =
ask_instance_groups_on_launch && instanceGroups.length > 0;
const showPromptedFields = const showPromptedFields =
showCredentialsDetail || showCredentialsDetail ||
@@ -250,7 +288,13 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
showSkipTagsDetail || showSkipTagsDetail ||
showTagsDetail || showTagsDetail ||
showVerbosityDetail || showVerbosityDetail ||
showVariablesDetail; showVariablesDetail ||
showExecutionEnvironmentDetail ||
showLabelsDetail ||
showForksDetail ||
showJobSlicingDetail ||
showTimeoutDetail ||
showInstanceGroupsDetail;
if (isLoading) { if (isLoading) {
return <ContentLoading />; return <ContentLoading />;
@@ -402,11 +446,20 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
dataCy="schedule-inventory" dataCy="schedule-inventory"
/> />
)} )}
{ask_verbosity_on_launch && ( {showExecutionEnvironmentDetail && (
<Detail <Detail
label={t`Verbosity`} label={t`Execution Environment`}
value={VERBOSITY()[verbosity]} value={
dataCy="schedule-verbosity" summary_fields?.execution_environment ? (
<Link
to={`/execution_environments/${summary_fields?.execution_environment?.id}/details`}
>
{summary_fields?.execution_environment?.name}
</Link>
) : (
' '
)
}
/> />
)} )}
{ask_scm_branch_on_launch && ( {ask_scm_branch_on_launch && (
@@ -419,6 +472,17 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
{ask_limit_on_launch && ( {ask_limit_on_launch && (
<Detail label={t`Limit`} value={limit} dataCy="schedule-limit" /> <Detail label={t`Limit`} value={limit} dataCy="schedule-limit" />
)} )}
{ask_forks_on_launch && <Detail label={t`Forks`} value={forks} />}
{ask_verbosity_on_launch && (
<Detail
label={t`Verbosity`}
value={VERBOSITY()[verbosity]}
dataCy="schedule-verbosity"
/>
)}
{ask_timeout_on_launch && (
<Detail label={t`Timeout`} value={timeout} />
)}
{showDiffModeDetail && ( {showDiffModeDetail && (
<Detail <Detail
label={t`Show Changes`} label={t`Show Changes`}
@@ -426,6 +490,38 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
dataCy="schedule-show-changes" dataCy="schedule-show-changes"
/> />
)} )}
{ask_job_slice_count_on_launch && (
<Detail label={t`Job Slicing`} value={job_slice_count} />
)}
{showInstanceGroupsDetail && (
<Detail
fullWidth
label={t`Instance Groups`}
value={
<ChipGroup
numChips={5}
totalChips={instanceGroups.length}
ouiaId="instance-group-chips"
>
{instanceGroups.map((ig) => (
<Link
to={`${buildLinkURL(ig)}${ig.id}/details`}
key={ig.id}
>
<Chip
key={ig.id}
ouiaId={`instance-group-${ig.id}-chip`}
isReadOnly
>
{ig.name}
</Chip>
</Link>
))}
</ChipGroup>
}
isEmpty={instanceGroups.length === 0}
/>
)}
{showCredentialsDetail && ( {showCredentialsDetail && (
<Detail <Detail
fullWidth fullWidth
@@ -449,6 +545,26 @@ function ScheduleDetail({ hasDaysToKeepField, schedule, surveyConfig }) {
dataCy="schedule-credentials" dataCy="schedule-credentials"
/> />
)} )}
{showLabelsDetail && (
<Detail
fullWidth
label={t`Labels`}
value={
<ChipGroup
numChips={5}
totalChips={labels.length}
ouiaId="schedule-label-chips"
>
{labels.map((l) => (
<Chip key={l.id} ouiaId={`label-${l.id}-chip`} isReadOnly>
{l.name}
</Chip>
))}
</ChipGroup>
}
isEmpty={labels.length === 0}
/>
)}
{showTagsDetail && ( {showTagsDetail && (
<Detail <Detail
fullWidth fullWidth

View File

@@ -23,6 +23,12 @@ const allPrompts = {
ask_tags_on_launch: true, ask_tags_on_launch: true,
ask_variables_on_launch: true, ask_variables_on_launch: true,
ask_verbosity_on_launch: true, ask_verbosity_on_launch: true,
ask_execution_environment_on_launch: true,
ask_labels_on_launch: true,
ask_forks_on_launch: true,
ask_job_slice_count_on_launch: true,
ask_timeout_on_launch: true,
ask_instance_groups_on_launch: true,
survey_enabled: true, survey_enabled: true,
inventory_needed_to_start: true, inventory_needed_to_start: true,
}, },
@@ -40,6 +46,12 @@ const noPrompts = {
ask_tags_on_launch: false, ask_tags_on_launch: false,
ask_variables_on_launch: false, ask_variables_on_launch: false,
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
}, },
}; };
@@ -91,6 +103,10 @@ const schedule = {
limit: null, limit: null,
diff_mode: null, diff_mode: null,
verbosity: null, verbosity: null,
execution_environment: null,
forks: null,
job_slice_count: null,
timeout: null,
}; };
const scheduleWithPrompts = { const scheduleWithPrompts = {
@@ -104,6 +120,10 @@ const scheduleWithPrompts = {
diff_mode: true, diff_mode: true,
verbosity: 1, verbosity: 1,
extra_data: { foo: 'fii' }, extra_data: { foo: 'fii' },
execution_environment: 1,
forks: 1,
job_slice_count: 1,
timeout: 100,
}; };
describe('<ScheduleDetail />', () => { describe('<ScheduleDetail />', () => {
@@ -182,6 +202,14 @@ describe('<ScheduleDetail />', () => {
expect(wrapper.find('Detail[label="Credentials"]').length).toBe(0); expect(wrapper.find('Detail[label="Credentials"]').length).toBe(0);
expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(0); expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(0);
expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(0); expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(0);
expect(wrapper.find('Detail[label="Timeout"]').length).toBe(0);
expect(wrapper.find('Detail[label="Job Slicing"]').length).toBe(0);
expect(wrapper.find('Detail[label="Forks"]').length).toBe(0);
expect(wrapper.find('Detail[label="Labels"]').length).toBe(0);
expect(wrapper.find('Detail[label="Instance Groups"]').length).toBe(0);
expect(wrapper.find('Detail[label="Execution Environment"]').length).toBe(
0
);
expect(wrapper.find('VariablesDetail').length).toBe(0); expect(wrapper.find('VariablesDetail').length).toBe(0);
}); });
test('details should render with the proper values with prompts', async () => { test('details should render with the proper values with prompts', async () => {
@@ -200,6 +228,28 @@ describe('<ScheduleDetail />', () => {
], ],
}, },
}); });
SchedulesAPI.readInstanceGroups.mockResolvedValue({
data: {
count: 1,
results: [
{
id: 1,
name: 'IG 1',
},
],
},
});
SchedulesAPI.readAllLabels.mockResolvedValue({
data: {
count: 1,
results: [
{
id: 1,
name: 'Label 1',
},
],
},
});
JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts); JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts);
await act(async () => { await act(async () => {
wrapper = mountWithContexts( wrapper = mountWithContexts(
@@ -254,6 +304,14 @@ describe('<ScheduleDetail />', () => {
expect(wrapper.find('Detail[label="Credentials"]').length).toBe(1); expect(wrapper.find('Detail[label="Credentials"]').length).toBe(1);
expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(1); expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(1);
expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(1); expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(1);
expect(wrapper.find('Detail[label="Timeout"]').length).toBe(1);
expect(wrapper.find('Detail[label="Job Slicing"]').length).toBe(1);
expect(wrapper.find('Detail[label="Forks"]').length).toBe(1);
expect(wrapper.find('Detail[label="Labels"]').length).toBe(1);
expect(wrapper.find('Detail[label="Instance Groups"]').length).toBe(1);
expect(wrapper.find('Detail[label="Execution Environment"]').length).toBe(
1
);
expect(wrapper.find('VariablesDetail').length).toBe(1); expect(wrapper.find('VariablesDetail').length).toBe(1);
}); });
test('prompt values section should be hidden if no overrides are present on the schedule but ask_ options are all true', async () => { test('prompt values section should be hidden if no overrides are present on the schedule but ask_ options are all true', async () => {
@@ -263,6 +321,18 @@ describe('<ScheduleDetail />', () => {
results: [], results: [],
}, },
}); });
SchedulesAPI.readInstanceGroups.mockResolvedValue({
data: {
count: 0,
results: [],
},
});
SchedulesAPI.readAllLabels.mockResolvedValue({
data: {
count: 0,
results: [],
},
});
JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts); JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts);
await act(async () => { await act(async () => {
wrapper = mountWithContexts( wrapper = mountWithContexts(
@@ -296,6 +366,14 @@ describe('<ScheduleDetail />', () => {
expect(wrapper.find('Detail[label="Credentials"]').length).toBe(0); expect(wrapper.find('Detail[label="Credentials"]').length).toBe(0);
expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(0); expect(wrapper.find('Detail[label="Job Tags"]').length).toBe(0);
expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(0); expect(wrapper.find('Detail[label="Skip Tags"]').length).toBe(0);
expect(wrapper.find('Detail[label="Timeout"]').length).toBe(0);
expect(wrapper.find('Detail[label="Job Slicing"]').length).toBe(0);
expect(wrapper.find('Detail[label="Forks"]').length).toBe(0);
expect(wrapper.find('Detail[label="Labels"]').length).toBe(0);
expect(wrapper.find('Detail[label="Instance Groups"]').length).toBe(0);
expect(wrapper.find('Detail[label="Execution Environment"]').length).toBe(
0
);
expect(wrapper.find('VariablesDetail').length).toBe(0); expect(wrapper.find('VariablesDetail').length).toBe(0);
}); });
test('prompt values section should be hidden if overrides are present on the schedule but ask_ options are all false', async () => { test('prompt values section should be hidden if overrides are present on the schedule but ask_ options are all false', async () => {
@@ -469,6 +547,18 @@ describe('<ScheduleDetail />', () => {
results: [], results: [],
}, },
}); });
SchedulesAPI.readInstanceGroups.mockResolvedValue({
data: {
count: 0,
results: [],
},
});
SchedulesAPI.readAllLabels.mockResolvedValue({
data: {
count: 0,
results: [],
},
});
JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts); JobTemplatesAPI.readLaunch.mockResolvedValueOnce(allPrompts);
await act(async () => { await act(async () => {
wrapper = mountWithContexts( wrapper = mountWithContexts(

View File

@@ -1,15 +1,14 @@
import React, { useState } from 'react'; import React, { useState } from 'react';
import { useHistory, useLocation } from 'react-router-dom'; import { useHistory, useLocation } from 'react-router-dom';
import { shape } from 'prop-types'; import { shape } from 'prop-types';
import { Card } from '@patternfly/react-core'; import { Card } from '@patternfly/react-core';
import yaml from 'js-yaml'; import yaml from 'js-yaml';
import { SchedulesAPI } from 'api'; import { OrganizationsAPI, SchedulesAPI } from 'api';
import { getAddedAndRemoved } from 'util/lists'; import { getAddedAndRemoved } from 'util/lists';
import { parseVariableField } from 'util/yaml'; import { parseVariableField } from 'util/yaml';
import mergeExtraVars from 'util/prompt/mergeExtraVars'; import mergeExtraVars from 'util/prompt/mergeExtraVars';
import getSurveyValues from 'util/prompt/getSurveyValues'; import getSurveyValues from 'util/prompt/getSurveyValues';
import createNewLabels from 'util/labels';
import ScheduleForm from '../shared/ScheduleForm'; import ScheduleForm from '../shared/ScheduleForm';
import buildRuleSet from '../shared/buildRuleSet'; import buildRuleSet from '../shared/buildRuleSet';
import { CardBody } from '../../Card'; import { CardBody } from '../../Card';
@@ -32,9 +31,13 @@ function ScheduleEdit({
values, values,
launchConfiguration, launchConfiguration,
surveyConfiguration, surveyConfiguration,
originalInstanceGroups,
originalLabels,
scheduleCredentials = [] scheduleCredentials = []
) => { ) => {
const { const {
execution_environment,
instance_groups,
inventory, inventory,
credentials = [], credentials = [],
frequency, frequency,
@@ -42,13 +45,9 @@ function ScheduleEdit({
exceptionFrequency, exceptionFrequency,
exceptionOptions, exceptionOptions,
timezone, timezone,
labels,
...submitValues ...submitValues
} = values; } = values;
const { added, removed } = getAddedAndRemoved(
[...(resource?.summary_fields.credentials || []), ...scheduleCredentials],
credentials
);
let extraVars; let extraVars;
const surveyValues = getSurveyValues(values); const surveyValues = getSurveyValues(values);
@@ -82,7 +81,24 @@ function ScheduleEdit({
submitValues.inventory = inventory.id; submitValues.inventory = inventory.id;
} }
if (execution_environment) {
submitValues.execution_environment = execution_environment.id;
}
try { try {
if (launchConfiguration?.ask_labels_on_launch) {
const { labelIds, error } = createNewLabels(
values.labels,
resource.organization
);
if (error) {
setFormSubmitError(error);
} else {
submitValues.labels = labelIds;
}
}
const ruleSet = buildRuleSet(values); const ruleSet = buildRuleSet(values);
const requestData = { const requestData = {
...submitValues, ...submitValues,
@@ -104,17 +120,52 @@ function ScheduleEdit({
const { const {
data: { id: scheduleId }, data: { id: scheduleId },
} = await SchedulesAPI.update(schedule.id, requestData); } = await SchedulesAPI.update(schedule.id, requestData);
if (values.credentials?.length > 0) {
await Promise.all([ const { added: addedCredentials, removed: removedCredentials } =
...removed.map(({ id }) => getAddedAndRemoved(
SchedulesAPI.disassociateCredential(scheduleId, id) [
), ...(resource?.summary_fields.credentials || []),
...added.map(({ id }) => ...scheduleCredentials,
SchedulesAPI.associateCredential(scheduleId, id) ],
), credentials
]); );
const { added: addedLabels, removed: removedLabels } = getAddedAndRemoved(
originalLabels,
labels
);
let organizationId = resource.organization;
if (addedLabels.length > 0) {
if (!organizationId) {
const {
data: { results },
} = await OrganizationsAPI.read();
organizationId = results[0].id;
}
} }
await Promise.all([
...removedCredentials.map(({ id }) =>
SchedulesAPI.disassociateCredential(scheduleId, id)
),
...addedCredentials.map(({ id }) =>
SchedulesAPI.associateCredential(scheduleId, id)
),
...removedLabels.map((label) =>
SchedulesAPI.disassociateLabel(scheduleId, label)
),
...addedLabels.map((label) =>
SchedulesAPI.associateLabel(scheduleId, label, organizationId)
),
SchedulesAPI.orderInstanceGroups(
scheduleId,
instance_groups || [],
originalInstanceGroups
),
]);
history.push(`${pathRoot}schedules/${scheduleId}/details`); history.push(`${pathRoot}schedules/${scheduleId}/details`);
} catch (err) { } catch (err) {
setFormSubmitError(err); setFormSubmitError(err);

View File

@@ -1,13 +1,12 @@
import React, { useEffect, useCallback, useState } from 'react'; import React, { useEffect, useCallback, useState, useRef } from 'react';
import { shape, func } from 'prop-types'; import { shape, func } from 'prop-types';
import { DateTime } from 'luxon'; import { DateTime } from 'luxon';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import { Formik } from 'formik'; import { Formik } from 'formik';
import { RRule } from 'rrule'; import { RRule } from 'rrule';
import { Button, Form, ActionGroup } from '@patternfly/react-core'; import { Button, Form, ActionGroup } from '@patternfly/react-core';
import { Config } from 'contexts/Config'; import { Config } from 'contexts/Config';
import { SchedulesAPI } from 'api'; import { JobTemplatesAPI, SchedulesAPI, WorkflowJobTemplatesAPI } from 'api';
import { dateToInputDateTime } from 'util/dates'; import { dateToInputDateTime } from 'util/dates';
import useRequest from 'hooks/useRequest'; import useRequest from 'hooks/useRequest';
import { parseVariableField } from 'util/yaml'; import { parseVariableField } from 'util/yaml';
@@ -31,7 +30,7 @@ const NUM_DAYS_PER_FREQUENCY = {
function ScheduleForm({ function ScheduleForm({
hasDaysToKeepField, hasDaysToKeepField,
handleCancel, handleCancel,
handleSubmit, handleSubmit: submitSchedule,
schedule, schedule,
submitError, submitError,
resource, resource,
@@ -41,6 +40,8 @@ function ScheduleForm({
}) { }) {
const [isWizardOpen, setIsWizardOpen] = useState(false); const [isWizardOpen, setIsWizardOpen] = useState(false);
const [isSaveDisabled, setIsSaveDisabled] = useState(false); const [isSaveDisabled, setIsSaveDisabled] = useState(false);
const originalLabels = useRef([]);
const originalInstanceGroups = useRef([]);
let rruleError; let rruleError;
const now = DateTime.now(); const now = DateTime.now();
@@ -60,12 +61,52 @@ function ScheduleForm({
useCallback(async () => { useCallback(async () => {
const { data } = await SchedulesAPI.readZoneInfo(); const { data } = await SchedulesAPI.readZoneInfo();
let creds; let creds = [];
let allLabels = [];
let allInstanceGroups = [];
if (schedule.id) { if (schedule.id) {
const { if (
data: { results }, resource.type === 'job_template' &&
} = await SchedulesAPI.readCredentials(schedule.id); launchConfig.ask_credential_on_launch
creds = results; ) {
const {
data: { results },
} = await SchedulesAPI.readCredentials(schedule.id);
creds = results;
}
if (launchConfig.ask_labels_on_launch) {
const {
data: { results },
} = await SchedulesAPI.readAllLabels(schedule.id);
allLabels = results;
}
if (
resource.type === 'job_template' &&
launchConfig.ask_instance_groups_on_launch
) {
const {
data: { results },
} = await SchedulesAPI.readInstanceGroups(schedule.id);
allInstanceGroups = results;
}
} else {
if (resource.type === 'job_template') {
if (launchConfig.ask_labels_on_launch) {
const {
data: { results },
} = await JobTemplatesAPI.readAllLabels(resource.id);
allLabels = results;
}
}
if (
resource.type === 'workflow_job_template' &&
launchConfig.ask_labels_on_launch
) {
const {
data: { results },
} = await WorkflowJobTemplatesAPI.readAllLabels(resource.id);
allLabels = results;
}
} }
const zones = (data.zones || []).map((zone) => ({ const zones = (data.zones || []).map((zone) => ({
@@ -74,12 +115,22 @@ function ScheduleForm({
label: zone, label: zone,
})); }));
originalLabels.current = allLabels;
originalInstanceGroups.current = allInstanceGroups;
return { return {
zoneOptions: zones, zoneOptions: zones,
zoneLinks: data.links, zoneLinks: data.links,
credentials: creds || [], credentials: creds,
}; };
}, [schedule]), }, [
schedule,
resource.id,
resource.type,
launchConfig.ask_labels_on_launch,
launchConfig.ask_instance_groups_on_launch,
launchConfig.ask_credential_on_launch,
]),
{ {
zonesOptions: [], zonesOptions: [],
zoneLinks: {}, zoneLinks: {},
@@ -225,6 +276,12 @@ function ScheduleForm({
launchConfig.ask_scm_branch_on_launch || launchConfig.ask_scm_branch_on_launch ||
launchConfig.ask_tags_on_launch || launchConfig.ask_tags_on_launch ||
launchConfig.ask_skip_tags_on_launch || launchConfig.ask_skip_tags_on_launch ||
launchConfig.ask_execution_environment_on_launch ||
launchConfig.ask_labels_on_launch ||
launchConfig.ask_forks_on_launch ||
launchConfig.ask_job_slice_count_on_launch ||
launchConfig.ask_timeout_on_launch ||
launchConfig.ask_instance_groups_on_launch ||
launchConfig.survey_enabled || launchConfig.survey_enabled ||
launchConfig.inventory_needed_to_start || launchConfig.inventory_needed_to_start ||
launchConfig.variables_needed_to_start?.length > 0) launchConfig.variables_needed_to_start?.length > 0)
@@ -301,19 +358,6 @@ function ScheduleForm({
startTime: time, startTime: time,
timezone: schedule.timezone || now.zoneName, timezone: schedule.timezone || now.zoneName,
}; };
const submitSchedule = (
values,
launchConfiguration,
surveyConfiguration,
scheduleCredentials
) => {
handleSubmit(
values,
launchConfiguration,
surveyConfiguration,
scheduleCredentials
);
};
if (hasDaysToKeepField) { if (hasDaysToKeepField) {
let initialDaysToKeep = 30; let initialDaysToKeep = 30;
@@ -436,7 +480,14 @@ function ScheduleForm({
}, },
}} }}
onSubmit={(values) => { onSubmit={(values) => {
submitSchedule(values, launchConfig, surveyConfig, credentials); submitSchedule(
values,
launchConfig,
surveyConfig,
originalInstanceGroups.current,
originalLabels.current,
credentials
);
}} }}
validate={validate} validate={validate}
> >
@@ -463,6 +514,8 @@ function ScheduleForm({
setIsSaveDisabled(false); setIsSaveDisabled(false);
}} }}
resourceDefaultCredentials={resourceDefaultCredentials} resourceDefaultCredentials={resourceDefaultCredentials}
labels={originalLabels.current}
instanceGroups={originalInstanceGroups.current}
/> />
)} )}
<FormSubmitError error={submitError} /> <FormSubmitError error={submitError} />

View File

@@ -17,11 +17,35 @@ jest.mock('../../../api/models/Inventories');
const credentials = { const credentials = {
data: { data: {
results: [ results: [
{ id: 1, kind: 'cloud', name: 'Cred 1', url: 'www.google.com' }, {
{ id: 2, kind: 'ssh', name: 'Cred 2', url: 'www.google.com' }, id: 1,
{ id: 3, kind: 'Ansible', name: 'Cred 3', url: 'www.google.com' }, kind: 'cloud',
{ id: 4, kind: 'Machine', name: 'Cred 4', url: 'www.google.com' }, name: 'Cred 1',
{ id: 5, kind: 'Machine', name: 'Cred 5', url: 'www.google.com' }, url: 'www.google.com',
inputs: {},
},
{ id: 2, kind: 'ssh', name: 'Cred 2', url: 'www.google.com', inputs: {} },
{
id: 3,
kind: 'Ansible',
name: 'Cred 3',
url: 'www.google.com',
inputs: {},
},
{
id: 4,
kind: 'Machine',
name: 'Cred 4',
url: 'www.google.com',
inputs: {},
},
{
id: 5,
kind: 'Machine',
name: 'Cred 5',
url: 'www.google.com',
inputs: {},
},
], ],
}, },
}; };
@@ -39,6 +63,12 @@ const launchData = {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -153,6 +183,12 @@ describe('<ScheduleForm />', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -208,6 +244,12 @@ describe('<ScheduleForm />', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -275,6 +317,12 @@ describe('<ScheduleForm />', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -406,6 +454,12 @@ describe('<ScheduleForm />', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: true, ask_inventory_on_launch: true,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -465,6 +519,12 @@ describe('<ScheduleForm />', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -894,7 +954,7 @@ describe('<ScheduleForm />', () => {
jest.clearAllMocks(); jest.clearAllMocks();
}); });
test('should make API calls to fetch credentials, launch configuration, and survey configuration', async () => { test('should make API calls to fetch credentials, labels, and zone info', async () => {
await act(async () => { await act(async () => {
wrapper = mountWithContexts( wrapper = mountWithContexts(
<ScheduleForm <ScheduleForm
@@ -906,6 +966,9 @@ describe('<ScheduleForm />', () => {
type: 'job_template', type: 'job_template',
name: 'Foo Job Template', name: 'Foo Job Template',
description: '', description: '',
summary_fields: {
credentials: [],
},
}} }}
launchConfig={{ launchConfig={{
can_start_without_user_input: true, can_start_without_user_input: true,
@@ -919,7 +982,13 @@ describe('<ScheduleForm />', () => {
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_credential_on_launch: false, ask_credential_on_launch: true,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: true,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -933,7 +1002,9 @@ describe('<ScheduleForm />', () => {
/> />
); );
}); });
expect(SchedulesAPI.readZoneInfo).toBeCalled();
expect(SchedulesAPI.readCredentials).toBeCalledWith(27); expect(SchedulesAPI.readCredentials).toBeCalledWith(27);
expect(SchedulesAPI.readAllLabels).toBeCalledWith(27);
}); });
test('should not call API to get credentials ', async () => { test('should not call API to get credentials ', async () => {
@@ -961,6 +1032,12 @@ describe('<ScheduleForm />', () => {
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false, survey_enabled: false,
variables_needed_to_start: [], variables_needed_to_start: [],
credential_needed_to_start: false, credential_needed_to_start: false,
@@ -991,6 +1068,30 @@ describe('<ScheduleForm />', () => {
name: 'Foo Project', name: 'Foo Project',
description: '', description: '',
}} }}
launchConfig={{
can_start_without_user_input: true,
passwords_needed_to_start: [],
ask_scm_branch_on_launch: false,
ask_variables_on_launch: false,
ask_tags_on_launch: false,
ask_diff_mode_on_launch: false,
ask_skip_tags_on_launch: false,
ask_job_type_on_launch: false,
ask_limit_on_launch: false,
ask_verbosity_on_launch: false,
ask_inventory_on_launch: false,
ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_labels_on_launch: false,
ask_forks_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_timeout_on_launch: false,
ask_instance_groups_on_launch: false,
survey_enabled: false,
variables_needed_to_start: [],
credential_needed_to_start: false,
inventory_needed_to_start: false,
}}
/> />
); );
}); });

View File

@@ -17,6 +17,8 @@ function SchedulePromptableFields({
credentials, credentials,
resource, resource,
resourceDefaultCredentials, resourceDefaultCredentials,
labels,
instanceGroups,
}) { }) {
const { setFieldTouched, values, initialValues, resetForm } = const { setFieldTouched, values, initialValues, resetForm } =
useFormikContext(); useFormikContext();
@@ -33,7 +35,9 @@ function SchedulePromptableFields({
schedule, schedule,
resource, resource,
credentials, credentials,
resourceDefaultCredentials resourceDefaultCredentials,
labels,
instanceGroups
); );
const [showDescription, setShowDescription] = useState(false); const [showDescription, setShowDescription] = useState(false);
const { error, dismissError } = useDismissableError(contentError); const { error, dismissError } = useDismissableError(contentError);

View File

@@ -3,6 +3,8 @@ import { useFormikContext } from 'formik';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import useInventoryStep from '../../LaunchPrompt/steps/useInventoryStep'; import useInventoryStep from '../../LaunchPrompt/steps/useInventoryStep';
import useCredentialsStep from '../../LaunchPrompt/steps/useCredentialsStep'; import useCredentialsStep from '../../LaunchPrompt/steps/useCredentialsStep';
import useExecutionEnvironmentStep from '../../LaunchPrompt/steps/useExecutionEnvironmentStep';
import useInstanceGroupsStep from '../../LaunchPrompt/steps/useInstanceGroupsStep';
import useOtherPromptsStep from '../../LaunchPrompt/steps/useOtherPromptsStep'; import useOtherPromptsStep from '../../LaunchPrompt/steps/useOtherPromptsStep';
import useSurveyStep from '../../LaunchPrompt/steps/useSurveyStep'; import useSurveyStep from '../../LaunchPrompt/steps/useSurveyStep';
import usePreviewStep from '../../LaunchPrompt/steps/usePreviewStep'; import usePreviewStep from '../../LaunchPrompt/steps/usePreviewStep';
@@ -12,9 +14,10 @@ export default function useSchedulePromptSteps(
launchConfig, launchConfig,
schedule, schedule,
resource, resource,
scheduleCredentials, scheduleCredentials,
resourceDefaultCredentials resourceDefaultCredentials,
labels,
instanceGroups
) { ) {
const sourceOfValues = const sourceOfValues =
(Object.keys(schedule).length > 0 && schedule) || resource; (Object.keys(schedule).length > 0 && schedule) || resource;
@@ -28,7 +31,9 @@ export default function useSchedulePromptSteps(
sourceOfValues, sourceOfValues,
resourceDefaultCredentials resourceDefaultCredentials
), ),
useOtherPromptsStep(launchConfig, sourceOfValues), useExecutionEnvironmentStep(launchConfig, resource),
useInstanceGroupsStep(launchConfig, resource, instanceGroups),
useOtherPromptsStep(launchConfig, sourceOfValues, labels),
useSurveyStep(launchConfig, surveyConfig, sourceOfValues, visited), useSurveyStep(launchConfig, surveyConfig, sourceOfValues, visited),
]; ];
@@ -37,7 +42,6 @@ export default function useSchedulePromptSteps(
steps.push( steps.push(
usePreviewStep( usePreviewStep(
launchConfig, launchConfig,
resource, resource,
surveyConfig, surveyConfig,
hasErrors, hasErrors,
@@ -130,6 +134,8 @@ export default function useSchedulePromptSteps(
setVisited({ setVisited({
inventory: true, inventory: true,
credentials: true, credentials: true,
executionEnvironment: true,
instanceGroups: true,
other: true, other: true,
survey: true, survey: true,
preview: true, preview: true,

View File

@@ -8,6 +8,7 @@ export function initReducer() {
addNodeTarget: null, addNodeTarget: null,
addingLink: false, addingLink: false,
contentError: null, contentError: null,
defaultOrganization: null,
isLoading: true, isLoading: true,
linkToDelete: null, linkToDelete: null,
linkToEdit: null, linkToEdit: null,
@@ -64,6 +65,11 @@ export default function visualizerReducer(state, action) {
...state, ...state,
contentError: action.value, contentError: action.value,
}; };
case 'SET_DEFAULT_ORGANIZATION':
return {
...state,
defaultOrganization: action.value,
};
case 'SET_IS_LOADING': case 'SET_IS_LOADING':
return { return {
...state, ...state,

View File

@@ -7,6 +7,7 @@ const defaultState = {
addNodeTarget: null, addNodeTarget: null,
addingLink: false, addingLink: false,
contentError: null, contentError: null,
defaultOrganization: null,
isLoading: true, isLoading: true,
linkToDelete: null, linkToDelete: null,
linkToEdit: null, linkToEdit: null,
@@ -1281,6 +1282,18 @@ describe('Workflow reducer', () => {
}); });
}); });
}); });
describe('SET_DEFAULT_ORGANIZATION', () => {
it('should set the state variable', () => {
const result = workflowReducer(defaultState, {
type: 'SET_DEFAULT_ORGANIZATION',
value: 1,
});
expect(result).toEqual({
...defaultState,
defaultOrganization: 1,
});
});
});
describe('SET_IS_LOADING', () => { describe('SET_IS_LOADING', () => {
it('should set the state variable', () => { it('should set the state variable', () => {
const result = workflowReducer(defaultState, { const result = workflowReducer(defaultState, {

View File

@@ -12,8 +12,8 @@ import { useState, useCallback } from 'react';
* } * }
*/ */
export default function useSelected(list = []) { export default function useSelected(list = [], defaultSelected = []) {
const [selected, setSelected] = useState([]); const [selected, setSelected] = useState(defaultSelected);
const isAllSelected = selected.length > 0 && selected.length === list.length; const isAllSelected = selected.length > 0 && selected.length === list.length;
const handleSelect = (row) => { const handleSelect = (row) => {

View File

@@ -391,6 +391,16 @@ function JobDetail({ job, inventorySourceLabels }) {
helpText={jobHelpText.forks} helpText={jobHelpText.forks}
/> />
)} )}
{typeof job.timeout === 'number' && (
<Detail
dataCy="timeout"
label={t`Timeout`}
value={
job.timeout ? t`${job.timeout} seconds` : t`No timeout specified`
}
helpText={jobHelpText.timeout}
/>
)}
{credential && ( {credential && (
<Detail <Detail
dataCy="job-machine-credential" dataCy="job-machine-credential"

View File

@@ -22,14 +22,26 @@ const jobTemplateData = {
allow_simultaneous: false, allow_simultaneous: false,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_diff_mode_on_launch: false, ask_diff_mode_on_launch: false,
ask_execution_environment_on_launch: false,
ask_forks_on_launch: false,
ask_instance_groups_on_launch: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_job_type_on_launch: false, ask_job_type_on_launch: false,
ask_labels_on_launch: false,
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_scm_branch_on_launch: false, ask_scm_branch_on_launch: false,
ask_skip_tags_on_launch: false, ask_skip_tags_on_launch: false,
ask_tags_on_launch: false, ask_tags_on_launch: false,
ask_timeout_on_launch: false,
ask_variables_on_launch: false, ask_variables_on_launch: false,
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_execution_environment_on_launch: false,
ask_forks_on_launch: false,
ask_instance_groups_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_labels_on_launch: false,
ask_timeout_on_launch: false,
become_enabled: false, become_enabled: false,
description: '', description: '',
diff_mode: false, diff_mode: false,

View File

@@ -35,14 +35,25 @@ const mockJobTemplate = {
allow_simultaneous: false, allow_simultaneous: false,
ask_scm_branch_on_launch: false, ask_scm_branch_on_launch: false,
ask_diff_mode_on_launch: false, ask_diff_mode_on_launch: false,
ask_execution_environment_on_launch: false,
ask_forks_on_launch: false,
ask_instance_groups_on_launch: false,
ask_variables_on_launch: false, ask_variables_on_launch: false,
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_tags_on_launch: false, ask_tags_on_launch: false,
ask_skip_tags_on_launch: false, ask_skip_tags_on_launch: false,
ask_job_type_on_launch: false, ask_job_type_on_launch: false,
ask_labels_on_launch: false,
ask_verbosity_on_launch: false, ask_verbosity_on_launch: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_credential_on_launch: false, ask_credential_on_launch: false,
ask_execution_environment_on_launch: false,
ask_forks_on_launch: false,
ask_instance_groups_on_launch: false,
ask_job_slice_count_on_launch: false,
ask_labels_on_launch: false,
ask_timeout_on_launch: false,
become_enabled: false, become_enabled: false,
description: 'Bar', description: 'Bar',
diff_mode: false, diff_mode: false,

View File

@@ -1,9 +1,7 @@
import React, { useState, useCallback, useEffect } from 'react'; import React, { useState, useCallback, useEffect } from 'react';
import { useHistory } from 'react-router-dom'; import { useHistory } from 'react-router-dom';
import { Card, PageSection } from '@patternfly/react-core'; import { Card, PageSection } from '@patternfly/react-core';
import { CardBody } from 'components/Card'; import { CardBody } from 'components/Card';
import { WorkflowJobTemplatesAPI, OrganizationsAPI, UsersAPI } from 'api'; import { WorkflowJobTemplatesAPI, OrganizationsAPI, UsersAPI } from 'api';
import { useConfig } from 'contexts/Config'; import { useConfig } from 'contexts/Config';
import useRequest from 'hooks/useRequest'; import useRequest from 'hooks/useRequest';
@@ -24,12 +22,16 @@ function WorkflowJobTemplateAdd() {
webhook_credential, webhook_credential,
webhook_key, webhook_key,
limit, limit,
job_tags,
skip_tags,
...templatePayload ...templatePayload
} = values; } = values;
templatePayload.inventory = inventory?.id; templatePayload.inventory = inventory?.id;
templatePayload.organization = organization?.id; templatePayload.organization = organization?.id;
templatePayload.webhook_credential = webhook_credential?.id; templatePayload.webhook_credential = webhook_credential?.id;
templatePayload.limit = limit === '' ? null : limit; templatePayload.limit = limit === '' ? null : limit;
templatePayload.job_tags = job_tags === '' ? null : job_tags;
templatePayload.skip_tags = skip_tags === '' ? null : skip_tags;
const organizationId = const organizationId =
organization?.id || inventory?.summary_fields?.organization.id; organization?.id || inventory?.summary_fields?.organization.id;
try { try {

View File

@@ -82,7 +82,7 @@ describe('<WorkflowJobTemplateAdd/>', () => {
test('calls workflowJobTemplatesAPI with correct information on submit', async () => { test('calls workflowJobTemplatesAPI with correct information on submit', async () => {
await act(async () => { await act(async () => {
wrapper.find('input#wfjt-name').simulate('change', { wrapper.find('input#wfjt-name').simulate('change', {
target: { value: 'Alex', name: 'name' }, target: { value: 'Alex Singh', name: 'name' },
}); });
wrapper.find('LabelSelect').find('SelectToggle').simulate('click'); wrapper.find('LabelSelect').find('SelectToggle').simulate('click');
@@ -104,18 +104,23 @@ describe('<WorkflowJobTemplateAdd/>', () => {
wrapper.find('form').simulate('submit'); wrapper.find('form').simulate('submit');
}); });
await expect(WorkflowJobTemplatesAPI.create).toHaveBeenCalledWith({ await expect(WorkflowJobTemplatesAPI.create).toHaveBeenCalledWith({
name: 'Alex', name: 'Alex Singh',
allow_simultaneous: false, allow_simultaneous: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_labels_on_launch: false,
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_scm_branch_on_launch: false, ask_scm_branch_on_launch: false,
ask_skip_tags_on_launch: false,
ask_tags_on_launch: false,
ask_variables_on_launch: false, ask_variables_on_launch: false,
description: '', description: '',
extra_vars: '---', extra_vars: '---',
inventory: undefined, inventory: undefined,
job_tags: null,
limit: null, limit: null,
organization: undefined, organization: undefined,
scm_branch: '', scm_branch: '',
skip_tags: null,
webhook_credential: undefined, webhook_credential: undefined,
webhook_service: '', webhook_service: '',
webhook_url: '', webhook_url: '',

View File

@@ -23,12 +23,16 @@ function WorkflowJobTemplateEdit({ template }) {
webhook_credential, webhook_credential,
webhook_key, webhook_key,
limit, limit,
job_tags,
skip_tags,
...templatePayload ...templatePayload
} = values; } = values;
templatePayload.inventory = inventory?.id || null; templatePayload.inventory = inventory?.id || null;
templatePayload.organization = organization?.id || null; templatePayload.organization = organization?.id || null;
templatePayload.webhook_credential = webhook_credential?.id || null; templatePayload.webhook_credential = webhook_credential?.id || null;
templatePayload.limit = limit === '' ? null : limit; templatePayload.limit = limit === '' ? null : limit;
templatePayload.job_tags = job_tags === '' ? null : job_tags;
templatePayload.skip_tags = skip_tags === '' ? null : skip_tags;
const formOrgId = const formOrgId =
organization?.id || inventory?.summary_fields?.organization.id || null; organization?.id || inventory?.summary_fields?.organization.id || null;

View File

@@ -161,6 +161,7 @@ describe('<WorkflowJobTemplateEdit/>', () => {
expect(WorkflowJobTemplatesAPI.update).toHaveBeenCalledWith(6, { expect(WorkflowJobTemplatesAPI.update).toHaveBeenCalledWith(6, {
name: 'Alex', name: 'Alex',
description: 'Apollo and Athena', description: 'Apollo and Athena',
skip_tags: '',
inventory: 1, inventory: 1,
organization: 1, organization: 1,
scm_branch: 'main', scm_branch: 'main',
@@ -174,6 +175,11 @@ describe('<WorkflowJobTemplateEdit/>', () => {
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_scm_branch_on_launch: false, ask_scm_branch_on_launch: false,
ask_variables_on_launch: false, ask_variables_on_launch: false,
ask_labels_on_launch: false,
ask_skip_tags_on_launch: false,
ask_tags_on_launch: false,
job_tags: null,
skip_tags: null,
}); });
wrapper.update(); wrapper.update();
await expect(WorkflowJobTemplatesAPI.disassociateLabel).toBeCalledWith(6, { await expect(WorkflowJobTemplatesAPI.disassociateLabel).toBeCalledWith(6, {
@@ -273,16 +279,21 @@ describe('<WorkflowJobTemplateEdit/>', () => {
expect(WorkflowJobTemplatesAPI.update).toBeCalledWith(6, { expect(WorkflowJobTemplatesAPI.update).toBeCalledWith(6, {
allow_simultaneous: false, allow_simultaneous: false,
ask_inventory_on_launch: false, ask_inventory_on_launch: false,
ask_labels_on_launch: false,
ask_limit_on_launch: false, ask_limit_on_launch: false,
ask_scm_branch_on_launch: false, ask_scm_branch_on_launch: false,
ask_skip_tags_on_launch: false,
ask_tags_on_launch: false,
ask_variables_on_launch: false, ask_variables_on_launch: false,
description: 'bar', description: 'bar',
extra_vars: '---', extra_vars: '---',
inventory: 1, inventory: 1,
job_tags: null,
limit: '5000', limit: '5000',
name: 'Foo', name: 'Foo',
organization: 1, organization: 1,
scm_branch: 'devel', scm_branch: 'devel',
skip_tags: null,
webhook_credential: null, webhook_credential: null,
webhook_service: '', webhook_service: '',
webhook_url: '', webhook_url: '',

View File

@@ -38,6 +38,8 @@ function NodeModalForm({
surveyConfig, surveyConfig,
isLaunchLoading, isLaunchLoading,
resourceDefaultCredentials, resourceDefaultCredentials,
labels,
instanceGroups,
}) { }) {
const history = useHistory(); const history = useHistory();
const dispatch = useContext(WorkflowDispatchContext); const dispatch = useContext(WorkflowDispatchContext);
@@ -66,7 +68,9 @@ function NodeModalForm({
surveyConfig, surveyConfig,
values.nodeResource, values.nodeResource,
askLinkType, askLinkType,
resourceDefaultCredentials resourceDefaultCredentials,
labels,
instanceGroups
); );
const handleSaveNode = () => { const handleSaveNode = () => {
@@ -241,7 +245,7 @@ const NodeModalInner = ({ title, ...rest }) => {
const { const {
request: readLaunchConfigs, request: readLaunchConfigs,
error: launchConfigError, error: launchConfigError,
result: { launchConfig, surveyConfig, resourceDefaultCredentials }, result: { launchConfig, surveyConfig, resourceDefaultCredentials, labels },
isLoading, isLoading,
} = useRequest( } = useRequest(
useCallback(async () => { useCallback(async () => {
@@ -260,9 +264,15 @@ const NodeModalInner = ({ title, ...rest }) => {
launchConfig: {}, launchConfig: {},
surveyConfig: {}, surveyConfig: {},
resourceDefaultCredentials: [], resourceDefaultCredentials: [],
labels: [],
}; };
} }
const readLabels =
values.nodeType === 'workflow_job_template'
? WorkflowJobTemplatesAPI.readAllLabels(values.nodeResource.id)
: JobTemplatesAPI.readAllLabels(values.nodeResource.id);
const { data: launch } = await readLaunch( const { data: launch } = await readLaunch(
values.nodeType, values.nodeType,
values?.nodeResource?.id values?.nodeResource?.id
@@ -291,10 +301,21 @@ const NodeModalInner = ({ title, ...rest }) => {
defaultCredentials = results; defaultCredentials = results;
} }
let defaultLabels = [];
if (launch.ask_labels_on_launch) {
const {
data: { results },
} = await readLabels;
defaultLabels = results;
}
return { return {
launchConfig: launch, launchConfig: launch,
surveyConfig: survey, surveyConfig: survey,
resourceDefaultCredentials: defaultCredentials, resourceDefaultCredentials: defaultCredentials,
labels: defaultLabels,
}; };
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
@@ -347,6 +368,8 @@ const NodeModalInner = ({ title, ...rest }) => {
resourceDefaultCredentials={resourceDefaultCredentials} resourceDefaultCredentials={resourceDefaultCredentials}
isLaunchLoading={isLoading} isLaunchLoading={isLoading}
title={wizardTitle} title={wizardTitle}
labels={labels}
instanceGroups={[]}
/> />
); );
}; };

View File

@@ -1,12 +1,10 @@
import React, { useContext, useEffect, useCallback } from 'react'; import React, { useContext, useEffect, useCallback } from 'react';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import { Button, Modal } from '@patternfly/react-core'; import { Button, Modal } from '@patternfly/react-core';
import { import {
WorkflowDispatchContext, WorkflowDispatchContext,
WorkflowStateContext, WorkflowStateContext,
} from 'contexts/Workflow'; } from 'contexts/Workflow';
import ContentError from 'components/ContentError'; import ContentError from 'components/ContentError';
import ContentLoading from 'components/ContentLoading'; import ContentLoading from 'components/ContentLoading';
import PromptDetail from 'components/PromptDetail'; import PromptDetail from 'components/PromptDetail';
@@ -21,6 +19,8 @@ function NodeViewModal({ readOnly }) {
const { const {
fullUnifiedJobTemplate, fullUnifiedJobTemplate,
originalNodeCredentials, originalNodeCredentials,
originalNodeInstanceGroups,
originalNodeLabels,
originalNodeObject, originalNodeObject,
promptValues, promptValues,
} = nodeToView; } = nodeToView;
@@ -157,6 +157,22 @@ function NodeViewModal({ readOnly }) {
if (launchConfig.ask_inventory_on_launch) { if (launchConfig.ask_inventory_on_launch) {
overrides.inventory = originalNodeObject.summary_fields.inventory; overrides.inventory = originalNodeObject.summary_fields.inventory;
} }
if (launchConfig.ask_execution_environment_on_launch) {
overrides.execution_environment =
originalNodeObject.summary_fields.execution_environment;
}
if (launchConfig.ask_labels_on_launch) {
overrides.labels = originalNodeLabels || [];
}
if (launchConfig.ask_forks_on_launch) {
overrides.forks = originalNodeObject.forks;
}
if (launchConfig.ask_job_slice_count_on_launch) {
overrides.job_slice_count = originalNodeObject.job_slice_count;
}
if (launchConfig.ask_timeout_on_launch) {
overrides.timeout = originalNodeObject.timeout;
}
if (launchConfig.ask_scm_branch_on_launch) { if (launchConfig.ask_scm_branch_on_launch) {
overrides.scm_branch = originalNodeObject.scm_branch; overrides.scm_branch = originalNodeObject.scm_branch;
} }
@@ -190,6 +206,9 @@ function NodeViewModal({ readOnly }) {
if (launchConfig.ask_credential_on_launch) { if (launchConfig.ask_credential_on_launch) {
overrides.credentials = originalNodeCredentials || []; overrides.credentials = originalNodeCredentials || [];
} }
if (launchConfig.ask_instance_groups_on_launch) {
overrides.instance_groups = originalNodeInstanceGroups || [];
}
} }
let nodeUpdatedConvergence = {}; let nodeUpdatedConvergence = {};

View File

@@ -3,9 +3,11 @@ import { useFormikContext } from 'formik';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
import useInventoryStep from 'components/LaunchPrompt/steps/useInventoryStep'; import useInventoryStep from 'components/LaunchPrompt/steps/useInventoryStep';
import useCredentialsStep from 'components/LaunchPrompt/steps/useCredentialsStep'; import useCredentialsStep from 'components/LaunchPrompt/steps/useCredentialsStep';
import useExecutionEnvironmentStep from 'components/LaunchPrompt/steps/useExecutionEnvironmentStep';
import useOtherPromptsStep from 'components/LaunchPrompt/steps/useOtherPromptsStep'; import useOtherPromptsStep from 'components/LaunchPrompt/steps/useOtherPromptsStep';
import useSurveyStep from 'components/LaunchPrompt/steps/useSurveyStep'; import useSurveyStep from 'components/LaunchPrompt/steps/useSurveyStep';
import usePreviewStep from 'components/LaunchPrompt/steps/usePreviewStep'; import usePreviewStep from 'components/LaunchPrompt/steps/usePreviewStep';
import useInstanceGroupsStep from 'components/LaunchPrompt/steps/useInstanceGroupsStep';
import { WorkflowStateContext } from 'contexts/Workflow'; import { WorkflowStateContext } from 'contexts/Workflow';
import { jsonToYaml } from 'util/yaml'; import { jsonToYaml } from 'util/yaml';
import { stringIsUUID } from 'util/strings'; import { stringIsUUID } from 'util/strings';
@@ -26,6 +28,12 @@ function showPreviewStep(nodeType, launchConfig) {
launchConfig.ask_variables_on_launch || launchConfig.ask_variables_on_launch ||
launchConfig.ask_limit_on_launch || launchConfig.ask_limit_on_launch ||
launchConfig.ask_scm_branch_on_launch || launchConfig.ask_scm_branch_on_launch ||
launchConfig.ask_execution_environment_on_launch ||
launchConfig.ask_labels_on_launch ||
launchConfig.ask_forks_on_launch ||
launchConfig.ask_job_slice_count_on_launch ||
launchConfig.ask_timeout_on_launch ||
launchConfig.ask_instance_groups_on_launch ||
launchConfig.survey_enabled || launchConfig.survey_enabled ||
(launchConfig.variables_needed_to_start && (launchConfig.variables_needed_to_start &&
launchConfig.variables_needed_to_start.length > 0) launchConfig.variables_needed_to_start.length > 0)
@@ -129,6 +137,20 @@ const getNodeToEditDefaultValues = (
} }
} }
if (launchConfig.ask_execution_environment_on_launch) {
if (nodeToEdit?.promptValues) {
initialValues.execution_environment =
nodeToEdit?.promptValues?.execution_environment;
} else if (
nodeToEdit?.originalNodeObject?.summary_fields?.execution_environment
) {
initialValues.execution_environment =
nodeToEdit?.originalNodeObject?.summary_fields?.execution_environment;
} else {
initialValues.execution_environment = null;
}
}
if (launchConfig.ask_credential_on_launch) { if (launchConfig.ask_credential_on_launch) {
if (nodeToEdit?.promptValues?.credentials) { if (nodeToEdit?.promptValues?.credentials) {
initialValues.credentials = nodeToEdit?.promptValues?.credentials; initialValues.credentials = nodeToEdit?.promptValues?.credentials;
@@ -197,6 +219,21 @@ const getNodeToEditDefaultValues = (
if (launchConfig.ask_diff_mode_on_launch) { if (launchConfig.ask_diff_mode_on_launch) {
initialValues.diff_mode = sourceOfValues?.diff_mode || false; initialValues.diff_mode = sourceOfValues?.diff_mode || false;
} }
if (launchConfig.ask_forks_on_launch) {
initialValues.forks = sourceOfValues?.forks || 0;
}
if (launchConfig.ask_job_slice_count_on_launch) {
initialValues.job_slice_count = sourceOfValues?.job_slice_count || 1;
}
if (launchConfig.ask_timeout_on_launch) {
initialValues.timeout = sourceOfValues?.timeout || 0;
}
if (launchConfig.ask_labels_on_launch) {
initialValues.labels = sourceOfValues?.labels || [];
}
if (launchConfig.ask_instance_groups_on_launch) {
initialValues.instance_groups = sourceOfValues?.instance_groups || [];
}
if (launchConfig.ask_variables_on_launch) { if (launchConfig.ask_variables_on_launch) {
const newExtraData = { ...sourceOfValues.extra_data }; const newExtraData = { ...sourceOfValues.extra_data };
@@ -242,7 +279,9 @@ export default function useWorkflowNodeSteps(
surveyConfig, surveyConfig,
resource, resource,
askLinkType, askLinkType,
resourceDefaultCredentials resourceDefaultCredentials,
labels,
instanceGroups
) { ) {
const { nodeToEdit } = useContext(WorkflowStateContext); const { nodeToEdit } = useContext(WorkflowStateContext);
const { const {
@@ -258,7 +297,9 @@ export default function useWorkflowNodeSteps(
useDaysToKeepStep(), useDaysToKeepStep(),
useInventoryStep(launchConfig, resource, visited), useInventoryStep(launchConfig, resource, visited),
useCredentialsStep(launchConfig, resource, resourceDefaultCredentials), useCredentialsStep(launchConfig, resource, resourceDefaultCredentials),
useOtherPromptsStep(launchConfig, resource), useExecutionEnvironmentStep(launchConfig, resource),
useInstanceGroupsStep(launchConfig, resource, instanceGroups),
useOtherPromptsStep(launchConfig, resource, labels),
useSurveyStep(launchConfig, surveyConfig, resource, visited), useSurveyStep(launchConfig, surveyConfig, resource, visited),
]; ];
@@ -348,6 +389,8 @@ export default function useWorkflowNodeSteps(
setVisited({ setVisited({
inventory: true, inventory: true,
credentials: true, credentials: true,
executionEnvironment: true,
instanceGroups: true,
other: true, other: true,
survey: true, survey: true,
preview: true, preview: true,

View File

@@ -1,6 +1,5 @@
import React, { useCallback, useEffect, useReducer } from 'react'; import React, { useCallback, useEffect, useReducer } from 'react';
import { useHistory } from 'react-router-dom'; import { useHistory } from 'react-router-dom';
import styled from 'styled-components'; import styled from 'styled-components';
import { shape } from 'prop-types'; import { shape } from 'prop-types';
import { t } from '@lingui/macro'; import { t } from '@lingui/macro';
@@ -18,6 +17,7 @@ import ContentLoading from 'components/ContentLoading';
import workflowReducer from 'components/Workflow/workflowReducer'; import workflowReducer from 'components/Workflow/workflowReducer';
import useRequest, { useDismissableError } from 'hooks/useRequest'; import useRequest, { useDismissableError } from 'hooks/useRequest';
import { import {
OrganizationsAPI,
WorkflowApprovalTemplatesAPI, WorkflowApprovalTemplatesAPI,
WorkflowJobTemplateNodesAPI, WorkflowJobTemplateNodesAPI,
WorkflowJobTemplatesAPI, WorkflowJobTemplatesAPI,
@@ -53,7 +53,18 @@ const Wrapper = styled.div`
`; `;
const replaceIdentifier = (node) => { const replaceIdentifier = (node) => {
if (stringIsUUID(node.originalNodeObject.identifier) || node.identifier) { if (
stringIsUUID(node.originalNodeObject.identifier) &&
typeof node.identifier === 'string' &&
node.identifier !== ''
) {
return true;
}
if (
!stringIsUUID(node.originalNodeObject.identifier) &&
node.originalNodeObject.identifier !== node.identifier
) {
return true; return true;
} }
@@ -126,6 +137,7 @@ function Visualizer({ template }) {
addNodeTarget: null, addNodeTarget: null,
addingLink: false, addingLink: false,
contentError: null, contentError: null,
defaultOrganization: null,
isLoading: true, isLoading: true,
linkToDelete: null, linkToDelete: null,
linkToEdit: null, linkToEdit: null,
@@ -148,6 +160,7 @@ function Visualizer({ template }) {
addLinkTargetNode, addLinkTargetNode,
addNodeSource, addNodeSource,
contentError, contentError,
defaultOrganization,
isLoading, isLoading,
linkToDelete, linkToDelete,
linkToEdit, linkToEdit,
@@ -261,6 +274,14 @@ function Visualizer({ template }) {
useEffect(() => { useEffect(() => {
async function fetchData() { async function fetchData() {
try { try {
const {
data: { results },
} = await OrganizationsAPI.read({ page_size: 1, page: 1 });
dispatch({
type: 'SET_DEFAULT_ORGANIZATION',
value: results[0]?.id,
});
const workflowNodes = await fetchWorkflowNodes(template.id); const workflowNodes = await fetchWorkflowNodes(template.id);
dispatch({ dispatch({
type: 'GENERATE_NODES_AND_LINKS', type: 'GENERATE_NODES_AND_LINKS',
@@ -302,6 +323,9 @@ function Visualizer({ template }) {
const deletedNodeIds = []; const deletedNodeIds = [];
const associateCredentialRequests = []; const associateCredentialRequests = [];
const disassociateCredentialRequests = []; const disassociateCredentialRequests = [];
const associateLabelRequests = [];
const disassociateLabelRequests = [];
const instanceGroupRequests = [];
const generateLinkMapAndNewLinks = () => { const generateLinkMapAndNewLinks = () => {
const linkMap = {}; const linkMap = {};
@@ -400,6 +424,8 @@ function Visualizer({ template }) {
nodeRequests.push( nodeRequests.push(
WorkflowJobTemplatesAPI.createNode(template.id, { WorkflowJobTemplatesAPI.createNode(template.id, {
...node.promptValues, ...node.promptValues,
execution_environment:
node.promptValues?.execution_environment?.id || null,
inventory: node.promptValues?.inventory?.id || null, inventory: node.promptValues?.inventory?.id || null,
unified_job_template: node.fullUnifiedJobTemplate.id, unified_job_template: node.fullUnifiedJobTemplate.id,
all_parents_must_converge: node.all_parents_must_converge, all_parents_must_converge: node.all_parents_must_converge,
@@ -423,6 +449,29 @@ function Visualizer({ template }) {
); );
}); });
} }
if (node.promptValues?.labels?.length > 0) {
node.promptValues.labels.forEach((label) => {
associateLabelRequests.push(
WorkflowJobTemplateNodesAPI.associateLabel(
data.id,
label,
node.fullUnifiedJobTemplate.organization ||
defaultOrganization
)
);
});
}
if (node.promptValues?.instance_groups?.length > 0)
/* eslint-disable no-await-in-loop, no-restricted-syntax */
for (const group of node.promptValues.instance_groups) {
instanceGroupRequests.push(
WorkflowJobTemplateNodesAPI.associateInstanceGroup(
data.id,
group.id
)
);
}
}) })
); );
} }
@@ -487,6 +536,8 @@ function Visualizer({ template }) {
nodeRequests.push( nodeRequests.push(
WorkflowJobTemplateNodesAPI.replace(node.originalNodeObject.id, { WorkflowJobTemplateNodesAPI.replace(node.originalNodeObject.id, {
...node.promptValues, ...node.promptValues,
execution_environment:
node.promptValues?.execution_environment?.id || null,
inventory: node.promptValues?.inventory?.id || null, inventory: node.promptValues?.inventory?.id || null,
unified_job_template: node.fullUnifiedJobTemplate.id, unified_job_template: node.fullUnifiedJobTemplate.id,
all_parents_must_converge: node.all_parents_must_converge, all_parents_must_converge: node.all_parents_must_converge,
@@ -503,6 +554,12 @@ function Visualizer({ template }) {
node.promptValues?.credentials node.promptValues?.credentials
); );
const { added: addedLabels, removed: removedLabels } =
getAddedAndRemoved(
node?.originalNodeLabels,
node.promptValues?.labels
);
if (addedCredentials.length > 0) { if (addedCredentials.length > 0) {
addedCredentials.forEach((cred) => { addedCredentials.forEach((cred) => {
associateCredentialRequests.push( associateCredentialRequests.push(
@@ -523,6 +580,41 @@ function Visualizer({ template }) {
) )
); );
} }
if (addedLabels.length > 0) {
addedLabels.forEach((label) => {
associateLabelRequests.push(
WorkflowJobTemplateNodesAPI.associateLabel(
node.originalNodeObject.id,
label,
node.fullUnifiedJobTemplate.organization ||
defaultOrganization
)
);
});
}
if (removedLabels?.length > 0) {
removedLabels.forEach((label) =>
disassociateLabelRequests.push(
WorkflowJobTemplateNodesAPI.disassociateLabel(
node.originalNodeObject.id,
label,
node.fullUnifiedJobTemplate.organization ||
defaultOrganization
)
)
);
}
if (node.promptValues?.instance_groups) {
instanceGroupRequests.push(
WorkflowJobTemplateNodesAPI.orderInstanceGroups(
node.originalNodeObject.id,
node.promptValues?.instance_groups,
node?.originalNodeInstanceGroups || []
)
);
}
}) })
); );
} }
@@ -539,11 +631,18 @@ function Visualizer({ template }) {
); );
await Promise.all(associateNodes(newLinks, originalLinkMap)); await Promise.all(associateNodes(newLinks, originalLinkMap));
await Promise.all(disassociateCredentialRequests); await Promise.all([
await Promise.all(associateCredentialRequests); ...disassociateCredentialRequests,
...disassociateLabelRequests,
]);
await Promise.all([
...associateCredentialRequests,
...associateLabelRequests,
...instanceGroupRequests,
]);
history.push(`/templates/workflow_job_template/${template.id}/details`); history.push(`/templates/workflow_job_template/${template.id}/details`);
}, [links, nodes, history, template.id]), }, [links, nodes, history, defaultOrganization, template.id]),
{} {}
); );

View File

@@ -1,6 +1,7 @@
import React from 'react'; import React from 'react';
import { act } from 'react-dom/test-utils'; import { act } from 'react-dom/test-utils';
import { import {
OrganizationsAPI,
WorkflowApprovalTemplatesAPI, WorkflowApprovalTemplatesAPI,
WorkflowJobTemplateNodesAPI, WorkflowJobTemplateNodesAPI,
WorkflowJobTemplatesAPI, WorkflowJobTemplatesAPI,
@@ -104,6 +105,12 @@ const mockWorkflowNodes = [
describe('Visualizer', () => { describe('Visualizer', () => {
let wrapper; let wrapper;
beforeEach(() => { beforeEach(() => {
OrganizationsAPI.read.mockResolvedValue({
data: {
count: 1,
results: [{ id: 1, name: 'Default' }],
},
});
WorkflowJobTemplatesAPI.readNodes.mockResolvedValue({ WorkflowJobTemplatesAPI.readNodes.mockResolvedValue({
data: { data: {
count: mockWorkflowNodes.length, count: mockWorkflowNodes.length,

View File

@@ -64,7 +64,6 @@ function VisualizerNode({
}) { }) {
const ref = useRef(null); const ref = useRef(null);
const [hovering, setHovering] = useState(false); const [hovering, setHovering] = useState(false);
const [credentialsError, setCredentialsError] = useState(null);
const [detailError, setDetailError] = useState(null); const [detailError, setDetailError] = useState(null);
const dispatch = useContext(WorkflowDispatchContext); const dispatch = useContext(WorkflowDispatchContext);
const { addingLink, addLinkSourceNode, nodePositions, nodes } = const { addingLink, addLinkSourceNode, nodePositions, nodes } =
@@ -72,7 +71,6 @@ function VisualizerNode({
const isAddLinkSourceNode = const isAddLinkSourceNode =
addLinkSourceNode && addLinkSourceNode.id === node.id; addLinkSourceNode && addLinkSourceNode.id === node.id;
const handleCredentialsErrorClose = () => setCredentialsError(null);
const handleDetailErrorClose = () => setDetailError(null); const handleDetailErrorClose = () => setDetailError(null);
const updateNode = async () => { const updateNode = async () => {
@@ -98,18 +96,47 @@ function VisualizerNode({
if ( if (
node?.originalNodeObject?.summary_fields?.unified_job_template node?.originalNodeObject?.summary_fields?.unified_job_template
?.unified_job_type === 'job' && ?.unified_job_type === 'job' ||
!node?.originalNodeCredentials node?.originalNodeObject?.summary_fields?.unified_job_template
?.unified_job_type === 'workflow_job'
) { ) {
try { try {
const { if (
data: { results }, node?.originalNodeObject?.summary_fields?.unified_job_template
} = await WorkflowJobTemplateNodesAPI.readCredentials( ?.unified_job_type === 'job' &&
node.originalNodeObject.id !node?.originalNodeCredentials
); ) {
updatedNode.originalNodeCredentials = results; const {
data: { results },
} = await WorkflowJobTemplateNodesAPI.readCredentials(
node.originalNodeObject.id
);
updatedNode.originalNodeCredentials = results;
}
if (
node?.originalNodeObject?.summary_fields?.unified_job_template
?.unified_job_type === 'job' &&
!node.originalNodeLabels
) {
const {
data: { results },
} = await WorkflowJobTemplateNodesAPI.readAllLabels(
node.originalNodeObject.id
);
updatedNode.originalNodeLabels = results;
updatedNode.originalNodeObject.labels = results;
}
if (!node.originalNodeInstanceGroups) {
const {
data: { results },
} = await WorkflowJobTemplateNodesAPI.readInstanceGroups(
node.originalNodeObject.id
);
updatedNode.originalNodeInstanceGroups = results;
updatedNode.originalNodeObject.instance_groups = results;
}
} catch (err) { } catch (err) {
setCredentialsError(err); setDetailError(err);
return null; return null;
} }
} }
@@ -350,17 +377,6 @@ function VisualizerNode({
<ErrorDetail error={detailError} /> <ErrorDetail error={detailError} />
</AlertModal> </AlertModal>
)} )}
{credentialsError && (
<AlertModal
isOpen={credentialsError}
variant="error"
title={t`Error!`}
onClose={handleCredentialsErrorClose}
>
{t`Failed to retrieve node credentials.`}
<ErrorDetail error={credentialsError} />
</AlertModal>
)}
</> </>
); );
} }

View File

@@ -6,7 +6,7 @@ const jtHelpTextStrings = () => ({
jobType: t`For job templates, select run to execute the playbook. Select check to only check playbook syntax, test environment setup, and report problems without executing the playbook.`, jobType: t`For job templates, select run to execute the playbook. Select check to only check playbook syntax, test environment setup, and report problems without executing the playbook.`,
inventory: t`Select the inventory containing the hosts you want this job to manage.`, inventory: t`Select the inventory containing the hosts you want this job to manage.`,
project: t`Select the project containing the playbook you want this job to execute.`, project: t`Select the project containing the playbook you want this job to execute.`,
executionEnvironmentForm: t`Select the execution environment for this job template.`, executionEnvironmentForm: t`The container image to be used for execution.`,
executionEnvironmentDetail: t`The execution environment that will be used when launching this job template. The resolved execution environment can be overridden by explicitly assigning a different one to this job template.`, executionEnvironmentDetail: t`The execution environment that will be used when launching this job template. The resolved execution environment can be overridden by explicitly assigning a different one to this job template.`,
playbook: t`Select the playbook to be executed by this job.`, playbook: t`Select the playbook to be executed by this job.`,
credentials: t`Select credentials for accessing the nodes this job will be ran against. You can only select one credential of each type. For machine credentials (SSH), checking "Prompt on launch" without selecting credentials will require you to select a machine credential at run time. If you select credentials and check "Prompt on launch", the selected credential(s) become the defaults that can be updated at run time.`, credentials: t`Select credentials for accessing the nodes this job will be ran against. You can only select one credential of each type. For machine credentials (SSH), checking "Prompt on launch" without selecting credentials will require you to select a machine credential at run time. If you select credentials and check "Prompt on launch", the selected credential(s) become the defaults that can be updated at run time.`,
@@ -24,7 +24,7 @@ const jtHelpTextStrings = () => ({
webhookURL: t`Webhook services can launch jobs with this workflow job template by making a POST request to this URL.`, webhookURL: t`Webhook services can launch jobs with this workflow job template by making a POST request to this URL.`,
webhookKey: t`Webhook services can use this as a shared secret.`, webhookKey: t`Webhook services can use this as a shared secret.`,
webhookCredential: t`Optionally select the credential to use to send status updates back to the webhook service.`, webhookCredential: t`Optionally select the credential to use to send status updates back to the webhook service.`,
sourceControlBranch: t`Select a branch for the workflow. This branch is applied to all job template nodes that prompt for a branch.`, sourceControlBranch: t`Branch to use in job run. Project default used if blank. Only allowed if project allow_override field is set to true.`,
provisioningCallbacks: (brandName = '') => provisioningCallbacks: (brandName = '') =>
t`Enables creation of a provisioning callback URL. Using the URL a host can contact ${brandName} and request a configuration update using this job template.`, t`Enables creation of a provisioning callback URL. Using the URL a host can contact ${brandName} and request a configuration update using this job template.`,
privilegeEscalation: t`If enabled, run this playbook as an administrator.`, privilegeEscalation: t`If enabled, run this playbook as an administrator.`,

Some files were not shown because too many files have changed in this diff Show More