mirror of
https://github.com/ansible/awx.git
synced 2026-01-09 23:12:08 -03:30
JT param everything (#12646)
* Making almost all fields promptable on job templates and config models * Adding EE, IG and label access checks * Changing jobs preferred instance group function to handle the new IG cache field * Adding new ask fields to job template modules * Address unit/functional tests * Adding migration file
This commit is contained in:
parent
04d0e3915c
commit
33c0fb79d6
@ -2923,6 +2923,12 @@ class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobO
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
'survey_enabled',
|
||||
'become_enabled',
|
||||
'diff_mode',
|
||||
@ -3036,6 +3042,9 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
'webhook_service',
|
||||
'webhook_credential',
|
||||
'webhook_guid',
|
||||
# TODO: Do we want these here or just in the summary fields?
|
||||
'labels',
|
||||
'instance_groups',
|
||||
)
|
||||
|
||||
def get_related(self, obj):
|
||||
@ -3062,6 +3071,11 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
res['project_update'] = self.reverse('api:project_update_detail', kwargs={'pk': obj.project_update.pk})
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
try:
|
||||
if obj.instance_groups:
|
||||
res['instance_groups'] = self.reverse('api:job_instance_group_list', kwargs={'pk': obj.pk})
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
@ -4083,7 +4097,6 @@ class SystemJobEventSerializer(AdHocCommandEventSerializer):
|
||||
|
||||
|
||||
class JobLaunchSerializer(BaseSerializer):
|
||||
|
||||
# Representational fields
|
||||
passwords_needed_to_start = serializers.ReadOnlyField()
|
||||
can_start_without_user_input = serializers.BooleanField(read_only=True)
|
||||
@ -4106,6 +4119,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
|
||||
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
|
||||
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
|
||||
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
|
||||
forks = serializers.IntegerField(required=False, write_only=True, default=1)
|
||||
job_slice_count = serializers.IntegerField(required=False, write_only=True, default=0)
|
||||
timeout = serializers.IntegerField(required=False, write_only=True, default=0)
|
||||
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
|
||||
|
||||
class Meta:
|
||||
model = JobTemplate
|
||||
@ -4133,6 +4152,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
'survey_enabled',
|
||||
'variables_needed_to_start',
|
||||
'credential_needed_to_start',
|
||||
@ -4140,6 +4165,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
'job_template_data',
|
||||
'defaults',
|
||||
'verbosity',
|
||||
'execution_environment',
|
||||
'labels',
|
||||
'forks',
|
||||
'job_slice_count',
|
||||
'timeout',
|
||||
'instance_groups',
|
||||
)
|
||||
read_only_fields = (
|
||||
'ask_scm_branch_on_launch',
|
||||
@ -4152,6 +4183,12 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
)
|
||||
|
||||
def get_credential_needed_to_start(self, obj):
|
||||
@ -4176,6 +4213,19 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
if cred.credential_type.managed and 'vault_id' in cred.credential_type.defined_fields:
|
||||
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
|
||||
defaults_dict.setdefault(field_name, []).append(cred_dict)
|
||||
elif field_name == 'execution_environment':
|
||||
if obj.execution_environment_id:
|
||||
defaults_dict[field_name] = {'id': obj.execution_environment.id, 'name': obj.execution_environment.name}
|
||||
else:
|
||||
defaults_dict[field_name] = {}
|
||||
elif field_name == 'labels':
|
||||
for label in obj.labels.all():
|
||||
label_dict = {'id': label.id, 'name': label.name}
|
||||
defaults_dict.setdefault(field_name, []).append(label_dict)
|
||||
elif field_name == 'instance_groups':
|
||||
for instance_group in obj.instance_groups.all():
|
||||
ig_dict = {'id': instance_group.id, 'name': instance_group.name}
|
||||
defaults_dict.setdefault(field_name, []).append(ig_dict)
|
||||
else:
|
||||
defaults_dict[field_name] = getattr(obj, field_name)
|
||||
return defaults_dict
|
||||
|
||||
@ -16,6 +16,7 @@ from awx.api.views import (
|
||||
JobStdout,
|
||||
JobNotificationsList,
|
||||
JobLabelList,
|
||||
JobInstanceGroupList,
|
||||
JobHostSummaryDetail,
|
||||
)
|
||||
|
||||
@ -33,6 +34,7 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/stdout/$', JobStdout.as_view(), name='job_stdout'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/notifications/$', JobNotificationsList.as_view(), name='job_notifications_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/labels/$', JobLabelList.as_view(), name='job_label_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', JobInstanceGroupList.as_view(), name='job_instance_group_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', JobHostSummaryDetail.as_view(), name='job_host_summary_detail'),
|
||||
]
|
||||
|
||||
|
||||
@ -22,6 +22,7 @@ from django.conf import settings
|
||||
from django.core.exceptions import FieldError, ObjectDoesNotExist
|
||||
from django.db.models import Q, Sum
|
||||
from django.db import IntegrityError, ProgrammingError, transaction, connection
|
||||
from django.db.models.fields.related import ManyToManyField, ForeignKey
|
||||
from django.shortcuts import get_object_or_404
|
||||
from django.utils.safestring import mark_safe
|
||||
from django.utils.timezone import now
|
||||
@ -2381,10 +2382,10 @@ class JobTemplateLaunch(RetrieveAPIView):
|
||||
for field, ask_field_name in modified_ask_mapping.items():
|
||||
if not getattr(obj, ask_field_name):
|
||||
data.pop(field, None)
|
||||
elif field == 'inventory':
|
||||
elif isinstance(getattr(obj.__class__, field).field, ForeignKey):
|
||||
data[field] = getattrd(obj, "%s.%s" % (field, 'id'), None)
|
||||
elif field == 'credentials':
|
||||
data[field] = [cred.id for cred in obj.credentials.all()]
|
||||
elif isinstance(getattr(obj.__class__, field).field, ManyToManyField):
|
||||
data[field] = [item.id for item in getattr(obj, field).all()]
|
||||
else:
|
||||
data[field] = getattr(obj, field)
|
||||
return data
|
||||
@ -3537,6 +3538,15 @@ class JobLabelList(SubListAPIView):
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class JobInstanceGroupList(SubListAPIView):
|
||||
|
||||
model = models.InstanceGroup
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.Job
|
||||
relationship = 'instance_groups'
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class WorkflowJobLabelList(JobLabelList):
|
||||
parent_model = models.WorkflowJob
|
||||
|
||||
|
||||
@ -1833,6 +1833,9 @@ class JobLaunchConfigAccess(BaseAccess):
|
||||
In order to create a new object with a copy of this launch config, I need:
|
||||
- use access to related inventory (if present)
|
||||
- use role to many-related credentials (if any present)
|
||||
- use role to Execution Environment (if present), unless the specified ee is already in the template
|
||||
- use role to many-related labels (if any present), unless the specified label is already in the template
|
||||
- use role to many-related instance groups (if any present), unless the specified instance group is already in the template
|
||||
"""
|
||||
|
||||
model = JobLaunchConfig
|
||||
@ -1850,6 +1853,7 @@ class JobLaunchConfigAccess(BaseAccess):
|
||||
def can_add(self, data, template=None):
|
||||
# This is a special case, we don't check related many-to-many elsewhere
|
||||
# launch RBAC checks use this
|
||||
permission_error = False
|
||||
if 'credentials' in data and data['credentials'] or 'reference_obj' in data:
|
||||
if 'reference_obj' in data:
|
||||
prompted_cred_qs = data['reference_obj'].credentials.all()
|
||||
@ -1862,12 +1866,58 @@ class JobLaunchConfigAccess(BaseAccess):
|
||||
cred_pks.remove(cred.pk)
|
||||
prompted_cred_qs = Credential.objects.filter(pk__in=cred_pks)
|
||||
if self._unusable_creds_exist(prompted_cred_qs):
|
||||
return False
|
||||
credential_names = [cred.name for cred in prompted_cred_qs]
|
||||
logger.debug("User {} not allowed to access credentials in {}".format(self.user.username, credential_names))
|
||||
permission_error = True
|
||||
if 'execution_environment' in data and data['execution_environment'] or 'reference_obj' in data:
|
||||
if 'reference_obj' in data:
|
||||
ee = data['reference_obj'].execution_environment
|
||||
else:
|
||||
ee = data['execution_environment']
|
||||
if ee and not self.user.can_access(ExecutionEnvironment, 'read', ee):
|
||||
if not template or ee != template.execution_environment:
|
||||
logger.debug("User {} not allowed access to ee {}".format(self.user.username, ee.name))
|
||||
permission_error = True
|
||||
else:
|
||||
logger.debug(
|
||||
"User {} does not have permissions to execution_environment {} but its part of the template".format(self.user.username, ee.name)
|
||||
)
|
||||
if 'labels' in data and data['labels'] or 'reference_obj' in data:
|
||||
if 'reference_obj' in data:
|
||||
labels = data['reference_obj'].labels.all()
|
||||
else:
|
||||
labels = data['labels']
|
||||
for a_label in labels:
|
||||
if not self.user.can_access(Label, 'read', a_label):
|
||||
# This if allows a template admin who can see labels to specify a list and the executor to select a subset of the list
|
||||
if not template or a_label not in template.labels.all():
|
||||
logger.debug("User {} not allowed access to label {}".format(self.user.username, a_label.name))
|
||||
permission_error = True
|
||||
else:
|
||||
logger.debug("User {} does not have permissions to label {} but its part of the template".format(self.user.username, a_label.name))
|
||||
if 'instance_groups' in data and data['instance_groups'] or 'reference_obj' in data:
|
||||
if 'reference_obj' in data:
|
||||
instance_groups = data['reference_obj'].labels.all()
|
||||
else:
|
||||
instance_groups = data['instance_groups']
|
||||
for an_ig in instance_groups:
|
||||
if not an_ig in self.user.get_queryset(InstanceGroup):
|
||||
# This if allows a template admin who can see IGs to specify a list and the executor to select a subset of the list
|
||||
if not template or an_ig not in template.instance_groups.all():
|
||||
logger.debug("user {} not allowed access to instance group {}".format(self.user.username, an_ig.name))
|
||||
permission_error = True
|
||||
else:
|
||||
logger.debug(
|
||||
"User {} does not have permissions to instance_group {} but its part of the template".format(self.user.username, an_ig.name)
|
||||
)
|
||||
if permission_error:
|
||||
return False
|
||||
return self.check_related('inventory', Inventory, data, role_field='use_role')
|
||||
|
||||
@check_superuser
|
||||
def can_use(self, obj):
|
||||
return self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True) and self.has_credentials_access(obj)
|
||||
inventory_check = self.check_related('inventory', Inventory, {}, obj=obj, role_field='use_role', mandatory=True)
|
||||
return inventory_check and self.has_credentials_access(obj)
|
||||
|
||||
def can_change(self, obj, data):
|
||||
return self.check_related('inventory', Inventory, data, obj=obj, role_field='use_role')
|
||||
|
||||
110
awx/main/migrations/0167_jt_prompt_everything_on_launch.py
Normal file
110
awx/main/migrations/0167_jt_prompt_everything_on_launch.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Generated by Django 3.2.13 on 2022-08-16 11:40
|
||||
|
||||
import awx.main.fields
|
||||
import awx.main.utils.polymorphic
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('main', '0166_alter_jobevent_host'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='joblaunchconfig',
|
||||
name='execution_environment',
|
||||
field=models.ForeignKey(
|
||||
blank=True,
|
||||
default=None,
|
||||
null=True,
|
||||
on_delete=awx.main.utils.polymorphic.SET_NULL,
|
||||
related_name='execution_environment',
|
||||
to='main.executionenvironment',
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='joblaunchconfig',
|
||||
name='labels',
|
||||
field=models.ManyToManyField(related_name='joblaunchconfig_labels', to='main.Label'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_execution_environment_on_launch',
|
||||
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_forks_on_launch',
|
||||
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_instance_groups_on_launch',
|
||||
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_job_slice_count_on_launch',
|
||||
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_labels_on_launch',
|
||||
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='jobtemplate',
|
||||
name='ask_timeout_on_launch',
|
||||
field=awx.main.fields.AskForField(blank=True, default=False),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='schedule',
|
||||
name='labels',
|
||||
field=models.ManyToManyField(related_name='schedule_labels', to='main.Label'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobnode',
|
||||
name='labels',
|
||||
field=models.ManyToManyField(related_name='workflowjobnode_labels', to='main.Label'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='workflowjobtemplatenode',
|
||||
name='labels',
|
||||
field=models.ManyToManyField(related_name='workflowjobtemplatenode_labels', to='main.Label'),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='JobLaunchConfigInstanceGroupMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||
('joblaunchconfig', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.joblaunchconfig')),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='JobInstanceGroupMembership',
|
||||
fields=[
|
||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('position', models.PositiveIntegerField(db_index=True, default=None, null=True)),
|
||||
('instancegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.instancegroup')),
|
||||
('unifiedjob', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.job')),
|
||||
],
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='job',
|
||||
name='instance_groups',
|
||||
field=awx.main.fields.OrderedManyToManyField(
|
||||
blank=True, editable=False, related_name='job_instance_groups', through='main.JobInstanceGroupMembership', to='main.InstanceGroup'
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='joblaunchconfig',
|
||||
name='instance_groups',
|
||||
field=awx.main.fields.OrderedManyToManyField(
|
||||
blank=True, editable=False, related_name='joblaunchconfigs', through='main.JobLaunchConfigInstanceGroupMembership', to='main.InstanceGroup'
|
||||
),
|
||||
),
|
||||
]
|
||||
@ -434,3 +434,25 @@ class InventoryInstanceGroupMembership(models.Model):
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class JobInstanceGroupMembership(models.Model):
|
||||
|
||||
unifiedjob = models.ForeignKey('Job', on_delete=models.CASCADE)
|
||||
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
|
||||
class JobLaunchConfigInstanceGroupMembership(models.Model):
|
||||
|
||||
joblaunchconfig = models.ForeignKey('JobLaunchConfig', on_delete=models.CASCADE)
|
||||
instancegroup = models.ForeignKey('InstanceGroup', on_delete=models.CASCADE)
|
||||
position = models.PositiveIntegerField(
|
||||
null=True,
|
||||
default=None,
|
||||
db_index=True,
|
||||
)
|
||||
|
||||
@ -43,8 +43,8 @@ from awx.main.models.notifications import (
|
||||
NotificationTemplate,
|
||||
JobNotificationMixin,
|
||||
)
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob
|
||||
from awx.main.utils import parse_yaml_or_json, getattr_dne, NullablePromptPseudoField, polymorphic
|
||||
from awx.main.fields import ImplicitRoleField, AskForField, JSONBlob, OrderedManyToManyField
|
||||
from awx.main.models.mixins import (
|
||||
ResourceMixin,
|
||||
SurveyJobTemplateMixin,
|
||||
@ -250,6 +250,30 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
)
|
||||
ask_credential_on_launch = AskForField(blank=True, default=False, allows_field='credentials')
|
||||
ask_scm_branch_on_launch = AskForField(blank=True, default=False, allows_field='scm_branch')
|
||||
ask_execution_environment_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
ask_labels_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
ask_forks_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
ask_job_slice_count_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
ask_timeout_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
ask_instance_groups_on_launch = AskForField(
|
||||
blank=True,
|
||||
default=False,
|
||||
)
|
||||
job_slice_count = models.PositiveIntegerField(
|
||||
blank=True,
|
||||
default=1,
|
||||
@ -276,7 +300,18 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
@classmethod
|
||||
def _get_unified_job_field_names(cls):
|
||||
return set(f.name for f in JobOptions._meta.fields) | set(
|
||||
['name', 'description', 'organization', 'survey_passwords', 'labels', 'credentials', 'job_slice_number', 'job_slice_count', 'execution_environment']
|
||||
[
|
||||
'name',
|
||||
'description',
|
||||
'organization',
|
||||
'survey_passwords',
|
||||
'labels',
|
||||
'credentials',
|
||||
'job_slice_number',
|
||||
'job_slice_count',
|
||||
'execution_environment',
|
||||
'instance_groups',
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
@ -314,10 +349,13 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
actual_inventory = self.inventory
|
||||
if self.ask_inventory_on_launch and 'inventory' in kwargs:
|
||||
actual_inventory = kwargs['inventory']
|
||||
actual_slice_count = self.job_slice_count
|
||||
if self.ask_job_slice_count_on_launch and 'slice_count' in kwargs:
|
||||
actual_slice_count = kwargs['slice_count']
|
||||
if actual_inventory:
|
||||
return min(self.job_slice_count, actual_inventory.hosts.count())
|
||||
return min(actual_slice_count, actual_inventory.hosts.count())
|
||||
else:
|
||||
return self.job_slice_count
|
||||
return actual_slice_count
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
update_fields = kwargs.get('update_fields', [])
|
||||
@ -425,10 +463,15 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
|
||||
field = self._meta.get_field(field_name)
|
||||
if isinstance(field, models.ManyToManyField):
|
||||
old_value = set(old_value.all())
|
||||
new_value = set(kwargs[field_name]) - old_value
|
||||
if not new_value:
|
||||
continue
|
||||
if field_name == 'instance_groups':
|
||||
# Instance groups are ordered so we can't make a set out of them
|
||||
old_value = old_value.all()
|
||||
elif field_name == 'credentials':
|
||||
# Credentials have a weird pattern because of how they are layered
|
||||
old_value = set(old_value.all())
|
||||
new_value = set(kwargs[field_name]) - old_value
|
||||
if not new_value:
|
||||
continue
|
||||
|
||||
if new_value == old_value:
|
||||
# no-op case: Fields the same as template's value
|
||||
@ -577,6 +620,13 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
default=1,
|
||||
help_text=_("If ran as part of sliced jobs, the total number of slices. " "If 1, job is not part of a sliced job."),
|
||||
)
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup',
|
||||
related_name='job_instance_groups',
|
||||
blank=True,
|
||||
editable=False,
|
||||
through='JobInstanceGroupMembership',
|
||||
)
|
||||
|
||||
def _get_parent_field_name(self):
|
||||
return 'job_template'
|
||||
@ -767,6 +817,8 @@ class Job(UnifiedJob, JobOptions, SurveyJobMixin, JobNotificationMixin, TaskMana
|
||||
|
||||
@property
|
||||
def preferred_instance_groups(self):
|
||||
# If the user specified instance groups those will be handled by the unified_job.create_unified_job
|
||||
# This function handles only the defaults for a template w/o user specification
|
||||
if self.organization is not None:
|
||||
organization_groups = [x for x in self.organization.instance_groups.all()]
|
||||
else:
|
||||
@ -919,7 +971,9 @@ class LaunchTimeConfigBase(BaseModel):
|
||||
continue # unsaved object can't have related many-to-many
|
||||
prompt_val = set(getattr(self, prompt_name).all())
|
||||
if len(prompt_val) > 0:
|
||||
data[prompt_name] = prompt_val
|
||||
# We used to return a set but that will cause issues with order for ordered fields (like instance_groups)
|
||||
# So instead we will return an array of items
|
||||
data[prompt_name] = [item for item in getattr(self, prompt_name).all()]
|
||||
elif prompt_name == 'extra_vars':
|
||||
if self.extra_vars:
|
||||
if display:
|
||||
@ -968,6 +1022,9 @@ class LaunchTimeConfig(LaunchTimeConfigBase):
|
||||
# Credentials needed for non-unified job / unified JT models
|
||||
credentials = models.ManyToManyField('Credential', related_name='%(class)ss')
|
||||
|
||||
# Labels needed for non-unified job / unified JT models
|
||||
labels = models.ManyToManyField('Label', related_name='%(class)s_labels')
|
||||
|
||||
@property
|
||||
def extra_vars(self):
|
||||
return self.extra_data
|
||||
@ -1010,6 +1067,15 @@ class JobLaunchConfig(LaunchTimeConfig):
|
||||
editable=False,
|
||||
)
|
||||
|
||||
# Instance Groups needed for non-unified job / unified JT models
|
||||
instance_groups = OrderedManyToManyField(
|
||||
'InstanceGroup', related_name='%(class)ss', blank=True, editable=False, through='JobLaunchConfigInstanceGroupMembership'
|
||||
)
|
||||
|
||||
execution_environment = models.ForeignKey(
|
||||
'ExecutionEnvironment', null=True, blank=True, default=None, on_delete=polymorphic.SET_NULL, related_name='execution_environment'
|
||||
)
|
||||
|
||||
def has_user_prompts(self, template):
|
||||
"""
|
||||
Returns True if any fields exist in the launch config that are
|
||||
|
||||
@ -382,7 +382,10 @@ class UnifiedJobTemplate(PolymorphicModel, CommonModelNameNotUnique, ExecutionEn
|
||||
unified_job.survey_passwords = new_job_passwords
|
||||
kwargs['survey_passwords'] = new_job_passwords # saved in config object for relaunch
|
||||
|
||||
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache()
|
||||
if 'instance_groups' in kwargs:
|
||||
unified_job.preferred_instance_groups_cache = [ig.id for ig in kwargs['instance_groups']]
|
||||
else:
|
||||
unified_job.preferred_instance_groups_cache = unified_job._get_preferred_instance_group_cache()
|
||||
|
||||
unified_job._set_default_dependencies_processed()
|
||||
unified_job.task_impact = unified_job._get_task_impact()
|
||||
@ -973,10 +976,16 @@ class UnifiedJob(
|
||||
valid_fields.extend(['survey_passwords', 'extra_vars'])
|
||||
else:
|
||||
kwargs.pop('survey_passwords', None)
|
||||
many_to_many_fields = []
|
||||
for field_name, value in kwargs.items():
|
||||
if field_name not in valid_fields:
|
||||
raise Exception('Unrecognized launch config field {}.'.format(field_name))
|
||||
if field_name == 'credentials':
|
||||
if isinstance(getattr(self.__class__, field_name).field, models.ManyToManyField):
|
||||
many_to_many_fields.append(field_name)
|
||||
continue
|
||||
if isinstance(getattr(self.__class__, field_name).field, (models.ForeignKey)):
|
||||
if value:
|
||||
setattr(config, "{}_id".format(field_name), value.id)
|
||||
continue
|
||||
key = field_name
|
||||
if key == 'extra_vars':
|
||||
@ -984,11 +993,22 @@ class UnifiedJob(
|
||||
setattr(config, key, value)
|
||||
config.save()
|
||||
|
||||
job_creds = set(kwargs.get('credentials', []))
|
||||
if 'credentials' in [field.name for field in parent._meta.get_fields()]:
|
||||
job_creds = job_creds - set(parent.credentials.all())
|
||||
if job_creds:
|
||||
config.credentials.add(*job_creds)
|
||||
for field_name in many_to_many_fields:
|
||||
if field_name == 'credentials':
|
||||
# Credentials are a special case of many to many because of how they function
|
||||
# (i.e. you can't have > 1 machine cred)
|
||||
job_item = set(kwargs.get(field_name, []))
|
||||
if field_name in [field.name for field in parent._meta.get_fields()]:
|
||||
job_item = job_item - set(getattr(parent, field_name).all())
|
||||
if job_item:
|
||||
getattr(config, field_name).add(*job_item)
|
||||
else:
|
||||
# Here we are doing a loop to make sure we preserve order in case this is a Ordered field
|
||||
job_item = kwargs.get(field_name, [])
|
||||
if job_item:
|
||||
for item in job_item:
|
||||
getattr(config, field_name).add(item)
|
||||
|
||||
return config
|
||||
|
||||
@property
|
||||
|
||||
@ -4,8 +4,7 @@ import yaml
|
||||
import json
|
||||
|
||||
from awx.api.serializers import JobLaunchSerializer
|
||||
from awx.main.models.credential import Credential
|
||||
from awx.main.models.inventory import Inventory, Host
|
||||
from awx.main.models import Credential, Inventory, Host, ExecutionEnvironment, Label, InstanceGroup
|
||||
from awx.main.models.jobs import Job, JobTemplate, UnifiedJobTemplate
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
@ -15,6 +14,9 @@ from awx.api.versioning import reverse
|
||||
def runtime_data(organization, credentialtype_ssh):
|
||||
cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'})
|
||||
inv_obj = organization.inventories.create(name="runtime-inv")
|
||||
ee_obj = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
ig_obj = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
|
||||
labels_obj = Label.objects.create(name='foo', description='bar', organization=organization)
|
||||
return dict(
|
||||
extra_vars='{"job_launch_var": 4}',
|
||||
limit='test-servers',
|
||||
@ -25,6 +27,12 @@ def runtime_data(organization, credentialtype_ssh):
|
||||
credentials=[cred_obj.pk],
|
||||
diff_mode=True,
|
||||
verbosity=2,
|
||||
execution_environment=ee_obj.pk,
|
||||
labels=[labels_obj.pk],
|
||||
forks=7,
|
||||
job_slice_count=12,
|
||||
timeout=10,
|
||||
instance_groups=[ig_obj.pk],
|
||||
)
|
||||
|
||||
|
||||
@ -54,6 +62,12 @@ def job_template_prompts(project, inventory, machine_credential):
|
||||
ask_credential_on_launch=on_off,
|
||||
ask_diff_mode_on_launch=on_off,
|
||||
ask_verbosity_on_launch=on_off,
|
||||
ask_execution_environment_on_launch=on_off,
|
||||
ask_labels_on_launch=on_off,
|
||||
ask_forks_on_launch=on_off,
|
||||
ask_job_slice_count_on_launch=on_off,
|
||||
ask_timeout_on_launch=on_off,
|
||||
ask_instance_groups_on_launch=on_off,
|
||||
)
|
||||
jt.credentials.add(machine_credential)
|
||||
return jt
|
||||
@ -77,6 +91,12 @@ def job_template_prompts_null(project):
|
||||
ask_credential_on_launch=True,
|
||||
ask_diff_mode_on_launch=True,
|
||||
ask_verbosity_on_launch=True,
|
||||
ask_execution_environment_on_launch=True,
|
||||
ask_labels_on_launch=True,
|
||||
ask_forks_on_launch=True,
|
||||
ask_job_slice_count_on_launch=True,
|
||||
ask_timeout_on_launch=True,
|
||||
ask_instance_groups_on_launch=True,
|
||||
)
|
||||
|
||||
|
||||
@ -92,6 +112,12 @@ def data_to_internal(data):
|
||||
internal['credentials'] = set(Credential.objects.get(pk=_id) for _id in data['credentials'])
|
||||
if 'inventory' in data:
|
||||
internal['inventory'] = Inventory.objects.get(pk=data['inventory'])
|
||||
if 'execution_environment' in data:
|
||||
internal['execution_environment'] = ExecutionEnvironment.objects.get(pk=data['execution_environment'])
|
||||
if 'labels' in data:
|
||||
internal['labels'] = [Label.objects.get(pk=_id) for _id in data['labels']]
|
||||
if 'instance_groups' in data:
|
||||
internal['instance_groups'] = [InstanceGroup.objects.get(pk=_id) for _id in data['instance_groups']]
|
||||
return internal
|
||||
|
||||
|
||||
@ -124,6 +150,12 @@ def test_job_ignore_unprompted_vars(runtime_data, job_template_prompts, post, ad
|
||||
assert 'credentials' in response.data['ignored_fields']
|
||||
assert 'job_tags' in response.data['ignored_fields']
|
||||
assert 'skip_tags' in response.data['ignored_fields']
|
||||
assert 'execution_environment' in response.data['ignored_fields']
|
||||
assert 'labels' in response.data['ignored_fields']
|
||||
assert 'forks' in response.data['ignored_fields']
|
||||
assert 'job_slice_count' in response.data['ignored_fields']
|
||||
assert 'timeout' in response.data['ignored_fields']
|
||||
assert 'instance_groups' in response.data['ignored_fields']
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@ -157,11 +189,28 @@ def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
|
||||
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
|
||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_tags': '', 'skip_tags': ''}, admin_user, expect=201)
|
||||
assert JobTemplate.create_unified_job.called
|
||||
assert JobTemplate.create_unified_job.call_args == ({'job_tags': '', 'skip_tags': ''},)
|
||||
assert JobTemplate.create_unified_job.call_args == ({'job_tags': '', 'skip_tags': '', 'forks': 1, 'job_slice_count': 0},)
|
||||
|
||||
mock_job.signal_start.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_slice_timeout_forks_need_int(job_template_prompts, post, admin_user, mocker):
|
||||
job_template = job_template_prompts(True)
|
||||
|
||||
mock_job = mocker.MagicMock(spec=Job, id=968)
|
||||
|
||||
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
||||
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
|
||||
response = post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'timeout': '', 'job_slice_count': '', 'forks': ''}, admin_user, expect=400
|
||||
)
|
||||
assert 'forks' in response.data and response.data['forks'][0] == 'A valid integer is required.'
|
||||
assert 'job_slice_count' in response.data and response.data['job_slice_count'][0] == 'A valid integer is required.'
|
||||
assert 'timeout' in response.data and response.data['timeout'][0] == 'A valid integer is required.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
|
||||
@ -176,6 +225,10 @@ def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null,
|
||||
inventory = Inventory.objects.get(pk=runtime_data['inventory'])
|
||||
inventory.use_role.members.add(rando)
|
||||
|
||||
# Instance Groups and label can not currently easily be used by rando so we need to remove the instance groups from the runtime data
|
||||
runtime_data.pop('instance_groups')
|
||||
runtime_data.pop('labels')
|
||||
|
||||
mock_job = mocker.MagicMock(spec=Job, id=968, **runtime_data)
|
||||
|
||||
with mocker.patch.object(JobTemplate, 'create_unified_job', return_value=mock_job):
|
||||
@ -243,12 +296,59 @@ def test_job_launch_fails_without_inventory_access(job_template_prompts, runtime
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_launch_fails_without_credential_access(job_template_prompts, runtime_data, post, rando):
|
||||
def test_job_launch_works_without_access_to_ig_if_ig_in_template(job_template_prompts, runtime_data, post, rando, mocker):
|
||||
job_template = job_template_prompts(True)
|
||||
job_template.instance_groups.add(InstanceGroup.objects.get(id=runtime_data['instance_groups'][0]))
|
||||
job_template.instance_groups.add(InstanceGroup.objects.create(name='foo'))
|
||||
job_template.save()
|
||||
job_template.execute_role.members.add(rando)
|
||||
|
||||
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
|
||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(instance_groups=runtime_data['instance_groups']), rando, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_launch_works_without_access_to_label_if_label_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
|
||||
job_template = job_template_prompts(True)
|
||||
job_template.labels.add(Label.objects.get(id=runtime_data['labels'][0]))
|
||||
job_template.labels.add(Label.objects.create(name='baz', description='faz', organization=organization))
|
||||
job_template.save()
|
||||
job_template.execute_role.members.add(rando)
|
||||
|
||||
# Make sure we get a 201 instead of a 403 since we are providing an override of just a subset of the instance gorup that was already added
|
||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(labels=runtime_data['labels']), rando, expect=201)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_launch_works_without_access_to_ee_if_ee_in_template(job_template_prompts, runtime_data, post, rando, mocker, organization):
|
||||
job_template = job_template_prompts(True)
|
||||
job_template.execute_role.members.add(rando)
|
||||
|
||||
# Make sure we get a 201 instead of a 403 since we are providing an override that is already in the template
|
||||
post(
|
||||
reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(execution_environment=runtime_data['execution_environment']), rando, expect=201
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'item_type',
|
||||
[
|
||||
('credentials'),
|
||||
('labels'),
|
||||
('instance_groups'),
|
||||
],
|
||||
)
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_launch_fails_without_access(job_template_prompts, runtime_data, post, rando, item_type):
|
||||
job_template = job_template_prompts(True)
|
||||
job_template.execute_role.members.add(rando)
|
||||
|
||||
# Assure that giving a credential without access blocks the launch
|
||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), dict(credentials=runtime_data['credentials']), rando, expect=403)
|
||||
data = {item_type: runtime_data[item_type]}
|
||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), data, rando, expect=403)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
@ -64,3 +64,18 @@ class TestSlicingModels:
|
||||
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
|
||||
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(3)]
|
||||
assert job_template.get_effective_slice_ct({'inventory': inventory2})
|
||||
|
||||
def test_effective_slice_count_prompt(self, job_template, inventory, organization):
|
||||
job_template.inventory = inventory
|
||||
# Add our prompt fields to the JT to allow overrides
|
||||
job_template.ask_job_slice_count_on_launch = True
|
||||
job_template.ask_inventory_on_launch = True
|
||||
# Set a default value of the slice count to something low
|
||||
job_template.job_slice_count = 2
|
||||
# Create an inventory with 4 nodes
|
||||
inventory2 = Inventory.objects.create(organization=organization, name='fooinv')
|
||||
[inventory2.hosts.create(name='foo{}'.format(i)) for i in range(4)]
|
||||
# The inventory slice count will be the min of the number of nodes (4) or the job slice (2)
|
||||
assert job_template.get_effective_slice_ct({'inventory': inventory2}) == 2
|
||||
# Now we are going to pass in an override (like the prompt would) and as long as that is < host count we expect that back
|
||||
assert job_template.get_effective_slice_ct({'inventory': inventory2, 'slice_count': 3}) == 3
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
# AWX
|
||||
from awx.main.models import JobTemplate, JobLaunchConfig
|
||||
from awx.main.models import JobTemplate, JobLaunchConfig, ExecutionEnvironment
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -61,7 +61,17 @@ class TestConfigReversibility:
|
||||
config = config_factory({'limit': 'foobar'})
|
||||
assert config.prompts_dict() == {'limit': 'foobar'}
|
||||
|
||||
def test_related_objects(self, config_factory, inventory, credential):
|
||||
prompts = {'limit': 'foobar', 'inventory': inventory, 'credentials': set([credential])}
|
||||
def test_related_objects(self, config_factory, inventory, credential, label, default_instance_group):
|
||||
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
prompts = {
|
||||
'limit': 'foobar',
|
||||
'inventory': inventory,
|
||||
'credentials': [credential],
|
||||
'execution_environment': ee,
|
||||
'labels': [label],
|
||||
'instance_groups': [default_instance_group],
|
||||
}
|
||||
config = config_factory(prompts)
|
||||
print(prompts)
|
||||
print(config.prompts_dict())
|
||||
assert config.prompts_dict() == prompts
|
||||
|
||||
@ -417,3 +417,31 @@ class TestInstanceGroupOrdering:
|
||||
assert job.preferred_instance_groups == [ig_inv, ig_org]
|
||||
job.job_template.instance_groups.add(ig_tmp)
|
||||
assert job.preferred_instance_groups == [ig_tmp, ig_inv, ig_org]
|
||||
|
||||
def test_job_instance_groups_cache_default(self, instance_group_factory, inventory, project, default_instance_group):
|
||||
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||
job = jt.create_unified_job()
|
||||
print(job.preferred_instance_groups_cache)
|
||||
print(default_instance_group)
|
||||
assert job.preferred_instance_groups_cache == [default_instance_group.id]
|
||||
|
||||
def test_job_instance_groups_cache_default_additional_items(self, instance_group_factory, inventory, project, default_instance_group):
|
||||
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
|
||||
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
|
||||
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
|
||||
project.organization.instance_groups.add(ig_org)
|
||||
inventory.instance_groups.add(ig_inv)
|
||||
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||
jt.instance_groups.add(ig_tmp)
|
||||
job = jt.create_unified_job()
|
||||
assert job.preferred_instance_groups_cache == [ig_tmp.id, ig_inv.id, ig_org.id]
|
||||
|
||||
def test_job_instance_groups_cache_prompt(self, instance_group_factory, inventory, project, default_instance_group):
|
||||
ig_org = instance_group_factory("OrgIstGrp", [default_instance_group.instances.first()])
|
||||
ig_inv = instance_group_factory("InvIstGrp", [default_instance_group.instances.first()])
|
||||
ig_tmp = instance_group_factory("TmpIstGrp", [default_instance_group.instances.first()])
|
||||
project.organization.instance_groups.add(ig_org)
|
||||
inventory.instance_groups.add(ig_inv)
|
||||
jt = JobTemplate.objects.create(inventory=inventory, project=project)
|
||||
job = jt.create_unified_job(instance_groups=[ig_tmp])
|
||||
assert job.preferred_instance_groups_cache == [ig_tmp.id]
|
||||
|
||||
@ -3,7 +3,20 @@ import pytest
|
||||
from unittest import mock
|
||||
import json
|
||||
|
||||
from awx.main.models import Job, Instance, JobHostSummary, InventoryUpdate, InventorySource, Project, ProjectUpdate, SystemJob, AdHocCommand
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
Instance,
|
||||
JobHostSummary,
|
||||
InventoryUpdate,
|
||||
InventorySource,
|
||||
Project,
|
||||
ProjectUpdate,
|
||||
SystemJob,
|
||||
AdHocCommand,
|
||||
InstanceGroup,
|
||||
Label,
|
||||
ExecutionEnvironment,
|
||||
)
|
||||
from awx.main.tasks.system import cluster_node_heartbeat
|
||||
from django.test.utils import override_settings
|
||||
|
||||
@ -103,14 +116,87 @@ def test_job_notification_host_data(inventory, machine_credential, project, job_
|
||||
class TestLaunchConfig:
|
||||
def test_null_creation_from_prompts(self):
|
||||
job = Job.objects.create()
|
||||
data = {"credentials": [], "extra_vars": {}, "limit": None, "job_type": None}
|
||||
data = {
|
||||
"credentials": [],
|
||||
"extra_vars": {},
|
||||
"limit": None,
|
||||
"job_type": None,
|
||||
"execution_environment": None,
|
||||
"instance_groups": None,
|
||||
"labels": None,
|
||||
"forks": None,
|
||||
"timeout": None,
|
||||
"job_slice_count": None,
|
||||
}
|
||||
config = job.create_config_from_prompts(data)
|
||||
assert config is None
|
||||
|
||||
def test_only_limit_defined(self, job_template):
|
||||
job = Job.objects.create(job_template=job_template)
|
||||
data = {"credentials": [], "extra_vars": {}, "job_tags": None, "limit": ""}
|
||||
data = {
|
||||
"credentials": [],
|
||||
"extra_vars": {},
|
||||
"job_tags": None,
|
||||
"limit": "",
|
||||
"execution_environment": None,
|
||||
"instance_groups": None,
|
||||
"labels": None,
|
||||
"forks": None,
|
||||
"timeout": None,
|
||||
"job_slice_count": None,
|
||||
}
|
||||
config = job.create_config_from_prompts(data)
|
||||
assert config.char_prompts == {"limit": ""}
|
||||
assert not config.credentials.exists()
|
||||
assert config.prompts_dict() == {"limit": ""}
|
||||
|
||||
def test_many_to_many_fields(self, job_template, organization):
|
||||
job = Job.objects.create(job_template=job_template)
|
||||
ig1 = InstanceGroup.objects.create(name='bar')
|
||||
ig2 = InstanceGroup.objects.create(name='foo')
|
||||
label1 = Label.objects.create(name='foo', description='bar', organization=organization)
|
||||
label2 = Label.objects.create(name='faz', description='baz', organization=organization)
|
||||
# Order should matter here which is why we do 2 and then 1
|
||||
data = {
|
||||
"credentials": [],
|
||||
"extra_vars": {},
|
||||
"job_tags": None,
|
||||
"limit": None,
|
||||
"execution_environment": None,
|
||||
"instance_groups": [ig2, ig1],
|
||||
"labels": [label2, label1],
|
||||
"forks": None,
|
||||
"timeout": None,
|
||||
"job_slice_count": None,
|
||||
}
|
||||
config = job.create_config_from_prompts(data)
|
||||
|
||||
assert config.instance_groups.exists()
|
||||
config_instance_group_ids = [item.id for item in config.instance_groups.all()]
|
||||
assert config_instance_group_ids == [ig2.id, ig1.id]
|
||||
|
||||
assert config.labels.exists()
|
||||
config_label_ids = [item.id for item in config.labels.all()]
|
||||
assert config_label_ids == [label2.id, label1.id]
|
||||
|
||||
def test_pk_field(self, job_template, organization):
|
||||
job = Job.objects.create(job_template=job_template)
|
||||
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
# Order should matter here which is why we do 2 and then 1
|
||||
data = {
|
||||
"credentials": [],
|
||||
"extra_vars": {},
|
||||
"job_tags": None,
|
||||
"limit": None,
|
||||
"execution_environment": ee,
|
||||
"instance_groups": [],
|
||||
"labels": [],
|
||||
"forks": None,
|
||||
"timeout": None,
|
||||
"job_slice_count": None,
|
||||
}
|
||||
config = job.create_config_from_prompts(data)
|
||||
|
||||
assert config.execution_environment
|
||||
# We just write the PK instead of trying to assign an item, that happens on the save
|
||||
assert config.execution_environment_id == ee.id
|
||||
|
||||
@ -3,7 +3,20 @@ import pytest
|
||||
from rest_framework.exceptions import PermissionDenied
|
||||
|
||||
from awx.main.access import JobAccess, JobLaunchConfigAccess, AdHocCommandAccess, InventoryUpdateAccess, ProjectUpdateAccess
|
||||
from awx.main.models import Job, JobLaunchConfig, JobTemplate, AdHocCommand, InventoryUpdate, InventorySource, ProjectUpdate, User, Credential
|
||||
from awx.main.models import (
|
||||
Job,
|
||||
JobLaunchConfig,
|
||||
JobTemplate,
|
||||
AdHocCommand,
|
||||
InventoryUpdate,
|
||||
InventorySource,
|
||||
ProjectUpdate,
|
||||
User,
|
||||
Credential,
|
||||
ExecutionEnvironment,
|
||||
InstanceGroup,
|
||||
Label,
|
||||
)
|
||||
|
||||
from crum import impersonate
|
||||
|
||||
@ -310,6 +323,26 @@ class TestLaunchConfigAccess:
|
||||
cred2.use_role.members.add(rando)
|
||||
assert access.has_credentials_access(config) # has access to both
|
||||
|
||||
def test_new_execution_environment_access(self, rando):
|
||||
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
access = JobLaunchConfigAccess(rando)
|
||||
|
||||
assert access.can_add({'execution_environment': ee}) # can add because access to ee will be granted
|
||||
|
||||
def test_new_label_access(self, rando, organization):
|
||||
label = Label.objects.create(name='foo', description='bar', organization=organization)
|
||||
access = JobLaunchConfigAccess(rando)
|
||||
|
||||
assert not access.can_add({'labels': [label]}) # can't add because no access to label
|
||||
# We assert in JT unit tests that the access will be granted if label is in JT
|
||||
|
||||
def test_new_instance_group_access(self, rando):
|
||||
ig = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
|
||||
access = JobLaunchConfigAccess(rando)
|
||||
|
||||
assert not access.can_add({'instance_groups': [ig]}) # can't add because no access to ig
|
||||
# We assert in JT unit tests that the access will be granted if instance group is in JT
|
||||
|
||||
def test_can_use_minor(self, rando):
|
||||
# Config object only has flat-field overrides, no RBAC restrictions
|
||||
job = Job.objects.create()
|
||||
|
||||
@ -55,6 +55,7 @@ class TestJobSerializerGetRelated:
|
||||
'job_events',
|
||||
'relaunch',
|
||||
'labels',
|
||||
'instance_groups',
|
||||
],
|
||||
)
|
||||
def test_get_related(self, test_get_related, job, related_resource_name):
|
||||
|
||||
@ -8,9 +8,17 @@ from rest_framework.exceptions import ValidationError
|
||||
from awx.api.serializers import JobLaunchSerializer
|
||||
|
||||
|
||||
def test_primary_key_related_field():
|
||||
@pytest.mark.parametrize(
|
||||
"param",
|
||||
[
|
||||
('credentials'),
|
||||
('instance_groups'),
|
||||
('labels'),
|
||||
],
|
||||
)
|
||||
def test_primary_key_related_field(param):
|
||||
# We are testing if the PrimaryKeyRelatedField in this serializer can take dictionary.
|
||||
# PrimaryKeyRelatedField should not be able to take dictionary as input, and should raise a ValidationError.
|
||||
data = {'credentials': {'1': '2', '3': '4'}}
|
||||
data = {param: {'1': '2', '3': '4'}}
|
||||
with pytest.raises(ValidationError):
|
||||
JobLaunchSerializer(data=data)
|
||||
|
||||
@ -531,6 +531,12 @@ def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
|
||||
src_field_value = getattr(obj1, field_name)
|
||||
if kwargs and field_name in kwargs:
|
||||
override_field_val = kwargs[field_name]
|
||||
# TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order?
|
||||
if field_name == 'instance_groups':
|
||||
# instance_groups are a list but we need to preserve the order
|
||||
for ig_id in override_field_val:
|
||||
getattr(obj2, field_name).add(ig_id)
|
||||
continue
|
||||
if isinstance(override_field_val, (set, list, QuerySet)):
|
||||
getattr(obj2, field_name).add(*override_field_val)
|
||||
continue
|
||||
|
||||
@ -86,6 +86,33 @@ options:
|
||||
description:
|
||||
- Passwords for credentials which are set to prompt on launch
|
||||
type: dict
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution environment to use for the job, only used if prompt for execution environment is set.
|
||||
type: str
|
||||
forks:
|
||||
description:
|
||||
- Forks to use for the job, only used if prompt for forks is set.
|
||||
type: int
|
||||
instance_groups:
|
||||
description:
|
||||
- Instance groups to use for the job, only used if prompt for instance groups is set.
|
||||
type: list
|
||||
elements: str
|
||||
job_slice_count:
|
||||
description:
|
||||
- Job slice count to use for the job, only used if prompt for job slice count is set.
|
||||
type: int
|
||||
labels:
|
||||
description:
|
||||
- Labels to use for the job, only used if prompt for labels is set.
|
||||
type: list
|
||||
elements: str
|
||||
job_timeout:
|
||||
description:
|
||||
- Timeout to use for the job, only used if prompt for timeout is set.
|
||||
- This parameter is sent through the API to the job.
|
||||
type: int
|
||||
wait:
|
||||
description:
|
||||
- Wait for the job to complete.
|
||||
@ -100,7 +127,7 @@ options:
|
||||
timeout:
|
||||
description:
|
||||
- If waiting for the job to complete this will abort after this
|
||||
amount of seconds
|
||||
amount of seconds. This happens on the module side.
|
||||
type: int
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
'''
|
||||
@ -165,6 +192,12 @@ def main():
|
||||
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4, 5]),
|
||||
diff_mode=dict(type='bool'),
|
||||
credential_passwords=dict(type='dict', no_log=False),
|
||||
execution_environment=dict(),
|
||||
forks=dict(type='int'),
|
||||
instance_groups=dict(type='list', elements='str'),
|
||||
job_slice_count=dict(type='int'),
|
||||
labels=dict(type='list', elements='str'),
|
||||
job_timeout=dict(type='int'),
|
||||
wait=dict(default=False, type='bool'),
|
||||
interval=dict(default=2.0, type='float'),
|
||||
timeout=dict(default=None, type='int'),
|
||||
@ -179,6 +212,9 @@ def main():
|
||||
inventory = module.params.get('inventory')
|
||||
organization = module.params.get('organization')
|
||||
credentials = module.params.get('credentials')
|
||||
execution_environment = module.params.get('execution_environment')
|
||||
instance_groups = module.params.get('instance_groups')
|
||||
labels = module.params.get('labels')
|
||||
wait = module.params.get('wait')
|
||||
interval = module.params.get('interval')
|
||||
timeout = module.params.get('timeout')
|
||||
@ -191,6 +227,9 @@ def main():
|
||||
'verbosity',
|
||||
'diff_mode',
|
||||
'credential_passwords',
|
||||
'forks',
|
||||
'job_slice_count',
|
||||
'job_timeout',
|
||||
):
|
||||
field_val = module.params.get(field_name)
|
||||
if field_val is not None:
|
||||
@ -204,6 +243,11 @@ def main():
|
||||
if skip_tags is not None:
|
||||
optional_args['skip_tags'] = ",".join(skip_tags)
|
||||
|
||||
# job_timeout is special because its actually timeout but we already had a timeout variable
|
||||
job_timeout = module.params.get('job_timeout')
|
||||
if job_timeout is not None:
|
||||
optional_args['timeout'] = job_timeout
|
||||
|
||||
# Create a datastructure to pass into our job launch
|
||||
post_data = {}
|
||||
for arg_name, arg_value in optional_args.items():
|
||||
@ -213,11 +257,21 @@ def main():
|
||||
# Attempt to look up the related items the user specified (these will fail the module if not found)
|
||||
if inventory:
|
||||
post_data['inventory'] = module.resolve_name_to_id('inventories', inventory)
|
||||
if execution_environment:
|
||||
post_data['execution_environment'] = module.resolve_name_to_id('execution_environments', execution_environment)
|
||||
|
||||
if credentials:
|
||||
post_data['credentials'] = []
|
||||
for credential in credentials:
|
||||
post_data['credentials'].append(module.resolve_name_to_id('credentials', credential))
|
||||
if labels:
|
||||
post_data['labels'] = []
|
||||
for label in labels:
|
||||
post_data['labels'].append(module.resolve_name_to_id('labels', label))
|
||||
if instance_groups:
|
||||
post_data['instance_groups'] = []
|
||||
for instance_group in instance_groups:
|
||||
post_data['instance_groups'].append(module.resolve_name_to_id('instance_groups', instance_group))
|
||||
|
||||
# Attempt to look up job_template based on the provided name
|
||||
lookup_data = {}
|
||||
|
||||
@ -208,6 +208,42 @@ options:
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_credential
|
||||
ask_execution_environment_on_launch:
|
||||
description:
|
||||
- Prompt user for execution environment on launch.
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_execution_environment
|
||||
ask_forks_on_launch:
|
||||
description:
|
||||
- Prompt user for forks on launch.
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_forks
|
||||
ask_instance_groups_on_launch:
|
||||
description:
|
||||
- Prompt user for instance groups on launch.
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_instance_groups
|
||||
ask_job_slice_count_on_launch:
|
||||
description:
|
||||
- Prompt user for job slice count on launch.
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_job_slice_count
|
||||
ask_labels_on_launch:
|
||||
description:
|
||||
- Prompt user for labels on launch.
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_labels
|
||||
ask_timeout_on_launch:
|
||||
description:
|
||||
- Prompt user for timeout on launch.
|
||||
type: bool
|
||||
aliases:
|
||||
- ask_timeout
|
||||
survey_enabled:
|
||||
description:
|
||||
- Enable a survey on the job template.
|
||||
@ -385,6 +421,12 @@ def main():
|
||||
ask_verbosity_on_launch=dict(type='bool', aliases=['ask_verbosity']),
|
||||
ask_inventory_on_launch=dict(type='bool', aliases=['ask_inventory']),
|
||||
ask_credential_on_launch=dict(type='bool', aliases=['ask_credential']),
|
||||
ask_execution_environment_on_launch=dict(type='bool', aliases=['ask_execution_environment']),
|
||||
ask_forks_on_launch=dict(type='bool', aliases=['ask_forks']),
|
||||
ask_instance_groups_on_launch=dict(type='bool', aliases=['ask_instance_groups']),
|
||||
ask_job_slice_count_on_launch=dict(type='bool', aliases=['ask_job_slice_count']),
|
||||
ask_labels_on_launch=dict(type='bool', aliases=['ask_labels']),
|
||||
ask_timeout_on_launch=dict(type='bool', aliases=['ask_timeout']),
|
||||
survey_enabled=dict(type='bool'),
|
||||
survey_spec=dict(type="dict"),
|
||||
become_enabled=dict(type='bool'),
|
||||
@ -484,6 +526,12 @@ def main():
|
||||
'ask_verbosity_on_launch',
|
||||
'ask_inventory_on_launch',
|
||||
'ask_credential_on_launch',
|
||||
'ask_execution_environment_on_launch',
|
||||
'ask_forks_on_launch',
|
||||
'ask_instance_groups_on_launch',
|
||||
'ask_job_slice_count_on_launch',
|
||||
'ask_labels_on_launch',
|
||||
'ask_timeout_on_launch',
|
||||
'survey_enabled',
|
||||
'become_enabled',
|
||||
'diff_mode',
|
||||
|
||||
@ -46,6 +46,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory):
|
||||
'timeout': 50,
|
||||
'allow_simultaneous': True,
|
||||
'ask_limit_on_launch': True,
|
||||
'ask_execution_environment_on_launch': True,
|
||||
'ask_forks_on_launch': True,
|
||||
'ask_instance_groups_on_launch': True,
|
||||
'ask_job_slice_count_on_launch': True,
|
||||
'ask_labels_on_launch': True,
|
||||
'ask_timeout_on_launch': True,
|
||||
}
|
||||
|
||||
result = run_module('job_template', module_args, admin_user)
|
||||
@ -55,6 +61,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory):
|
||||
assert jt.timeout == 50
|
||||
assert jt.allow_simultaneous
|
||||
assert jt.ask_limit_on_launch
|
||||
assert jt.ask_execution_environment_on_launch
|
||||
assert jt.ask_forks_on_launch
|
||||
assert jt.ask_instance_groups_on_launch
|
||||
assert jt.ask_job_slice_count_on_launch
|
||||
assert jt.ask_labels_on_launch
|
||||
assert jt.ask_timeout_on_launch
|
||||
|
||||
module_args = {
|
||||
'name': 'foo',
|
||||
@ -68,6 +80,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory):
|
||||
'timeout': 0,
|
||||
'allow_simultaneous': False,
|
||||
'ask_limit_on_launch': False,
|
||||
'ask_execution_environment_on_launch': False,
|
||||
'ask_forks_on_launch': False,
|
||||
'ask_instance_groups_on_launch': False,
|
||||
'ask_job_slice_count_on_launch': False,
|
||||
'ask_labels_on_launch': False,
|
||||
'ask_timeout_on_launch': False,
|
||||
}
|
||||
|
||||
result = run_module('job_template', module_args, admin_user)
|
||||
@ -78,6 +96,12 @@ def test_resets_job_template_values(run_module, admin_user, project, inventory):
|
||||
assert jt.timeout == 0
|
||||
assert not jt.allow_simultaneous
|
||||
assert not jt.ask_limit_on_launch
|
||||
assert not jt.ask_execution_environment_on_launch
|
||||
assert not jt.ask_forks_on_launch
|
||||
assert not jt.ask_instance_groups_on_launch
|
||||
assert not jt.ask_job_slice_count_on_launch
|
||||
assert not jt.ask_labels_on_launch
|
||||
assert not jt.ask_timeout_on_launch
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user