mirror of
https://github.com/ansible/awx.git
synced 2026-01-13 02:50:02 -03:30
Fix bug with missing parent field and diff with parent
Remove corresponding views for job instance_groups Validate job_slice_count in API Remove defaults from some job launch view prompts the null default is preferable
This commit is contained in:
parent
e069150fbf
commit
e231e08869
@ -3068,7 +3068,6 @@ class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
|
||||
res['project_update'] = self.reverse('api:project_update_detail', kwargs={'pk': obj.project_update.pk})
|
||||
except ObjectDoesNotExist:
|
||||
pass
|
||||
res['instance_groups'] = self.reverse('api:job_instance_group_list', kwargs={'pk': obj.pk})
|
||||
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
|
||||
return res
|
||||
|
||||
@ -4141,9 +4140,9 @@ class JobLaunchSerializer(BaseSerializer):
|
||||
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
|
||||
execution_environment = serializers.PrimaryKeyRelatedField(queryset=ExecutionEnvironment.objects.all(), required=False, write_only=True)
|
||||
labels = serializers.PrimaryKeyRelatedField(many=True, queryset=Label.objects.all(), required=False, write_only=True)
|
||||
forks = serializers.IntegerField(required=False, write_only=True, min_value=0, default=1)
|
||||
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0, default=0)
|
||||
timeout = serializers.IntegerField(required=False, write_only=True, default=0)
|
||||
forks = serializers.IntegerField(required=False, write_only=True, min_value=0)
|
||||
job_slice_count = serializers.IntegerField(required=False, write_only=True, min_value=0)
|
||||
timeout = serializers.IntegerField(required=False, write_only=True)
|
||||
instance_groups = serializers.PrimaryKeyRelatedField(many=True, queryset=InstanceGroup.objects.all(), required=False, write_only=True)
|
||||
|
||||
class Meta:
|
||||
|
||||
@ -16,7 +16,6 @@ from awx.api.views import (
|
||||
JobStdout,
|
||||
JobNotificationsList,
|
||||
JobLabelList,
|
||||
JobInstanceGroupList,
|
||||
JobHostSummaryDetail,
|
||||
)
|
||||
|
||||
@ -34,7 +33,6 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/stdout/$', JobStdout.as_view(), name='job_stdout'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/notifications/$', JobNotificationsList.as_view(), name='job_notifications_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/labels/$', JobLabelList.as_view(), name='job_label_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instance_groups/$', JobInstanceGroupList.as_view(), name='job_instance_group_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', JobHostSummaryDetail.as_view(), name='job_host_summary_detail'),
|
||||
]
|
||||
|
||||
|
||||
@ -3568,15 +3568,6 @@ class JobLabelList(SubListAPIView):
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class JobInstanceGroupList(SubListAPIView):
|
||||
|
||||
model = models.InstanceGroup
|
||||
serializer_class = serializers.InstanceGroupSerializer
|
||||
parent_model = models.Job
|
||||
relationship = 'instance_groups'
|
||||
parent_key = 'job'
|
||||
|
||||
|
||||
class WorkflowJobLabelList(JobLabelList):
|
||||
parent_model = models.WorkflowJob
|
||||
|
||||
|
||||
@ -333,9 +333,6 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
actual_slice_count = self.job_slice_count
|
||||
if self.ask_job_slice_count_on_launch and 'job_slice_count' in kwargs:
|
||||
actual_slice_count = kwargs['job_slice_count']
|
||||
# Set the eager fields if its there as well
|
||||
if '_eager_fields' in kwargs and 'job_slice_count' in kwargs['_eager_fields']:
|
||||
kwargs['_eager_fields']['job_slice_count'] = actual_slice_count
|
||||
if actual_inventory:
|
||||
return min(actual_slice_count, actual_inventory.hosts.count())
|
||||
else:
|
||||
@ -476,6 +473,10 @@ class JobTemplate(UnifiedJobTemplate, JobOptions, SurveyJobTemplateMixin, Resour
|
||||
rejected_data[field_name] = new_value
|
||||
errors_dict[field_name] = _('Project does not allow override of branch.')
|
||||
continue
|
||||
elif field_name == 'job_slice_count' and (new_value > 1) and (self.get_effective_slice_ct(kwargs) <= 1):
|
||||
rejected_data[field_name] = new_value
|
||||
errors_dict[field_name] = _('Job inventory does not have enough hosts for slicing')
|
||||
continue
|
||||
# accepted prompt
|
||||
prompted_data[field_name] = new_value
|
||||
else:
|
||||
|
||||
@ -985,37 +985,33 @@ class UnifiedJob(
|
||||
for field_name, value in kwargs.items():
|
||||
if field_name not in valid_fields:
|
||||
raise Exception('Unrecognized launch config field {}.'.format(field_name))
|
||||
if isinstance(getattr(self.__class__, field_name).field, models.ManyToManyField):
|
||||
field = None
|
||||
# may use extra_data as a proxy for extra_vars
|
||||
if field_name in config.SUBCLASS_FIELDS and field_name != 'extra_vars':
|
||||
field = config._meta.get_field(field_name)
|
||||
if isinstance(field, models.ManyToManyField):
|
||||
many_to_many_fields.append(field_name)
|
||||
continue
|
||||
if isinstance(getattr(self.__class__, field_name).field, (models.ForeignKey)):
|
||||
if value:
|
||||
setattr(config, "{}_id".format(field_name), value.id)
|
||||
continue
|
||||
key = field_name
|
||||
if key == 'extra_vars':
|
||||
key = 'extra_data'
|
||||
setattr(config, key, value)
|
||||
if isinstance(field, (models.ForeignKey)) and (value is None):
|
||||
continue # the null value indicates not-provided for ForeignKey case
|
||||
setattr(config, field_name, value)
|
||||
config.save()
|
||||
|
||||
for field_name in many_to_many_fields:
|
||||
if field_name == 'credentials':
|
||||
# Credentials are a special case of many to many because of how they function
|
||||
# (i.e. you can't have > 1 machine cred)
|
||||
job_item = set(kwargs.get(field_name, []))
|
||||
if field_name in [field.name for field in parent._meta.get_fields()]:
|
||||
job_item = job_item - set(getattr(parent, field_name).all())
|
||||
if job_item:
|
||||
getattr(config, field_name).add(*job_item)
|
||||
prompted_items = kwargs.get(field_name, [])
|
||||
if not prompted_items:
|
||||
continue
|
||||
if field_name == 'instance_groups':
|
||||
# Here we are doing a loop to make sure we preserve order for this Ordered field
|
||||
# also do not merge IGs with parent, so this saves the literal list
|
||||
for item in prompted_items:
|
||||
getattr(config, field_name).add(item)
|
||||
else:
|
||||
# Here we are doing a loop to make sure we preserve order in case this is a Ordered field
|
||||
job_item = kwargs.get(field_name, [])
|
||||
if job_item:
|
||||
parent_items = list(getattr(parent, field_name, []).all())
|
||||
for item in job_item:
|
||||
# Do not include this item in the config if its in the parent
|
||||
if item not in parent_items:
|
||||
getattr(config, field_name).add(item)
|
||||
# Assuming this field merges prompts with parent, save just the diff
|
||||
if field_name in [field.name for field in parent._meta.get_fields()]:
|
||||
prompted_items = set(prompted_items) - set(getattr(parent, field_name).all())
|
||||
if prompted_items:
|
||||
getattr(config, field_name).add(*prompted_items)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
@ -14,6 +14,8 @@ from awx.api.versioning import reverse
|
||||
def runtime_data(organization, credentialtype_ssh):
|
||||
cred_obj = Credential.objects.create(name='runtime-cred', credential_type=credentialtype_ssh, inputs={'username': 'test_user2', 'password': 'pas4word2'})
|
||||
inv_obj = organization.inventories.create(name="runtime-inv")
|
||||
inv_obj.hosts.create(name='foo1')
|
||||
inv_obj.hosts.create(name='foo2')
|
||||
ee_obj = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
ig_obj = InstanceGroup.objects.create(name='bar', policy_instance_percentage=100, policy_instance_minimum=2)
|
||||
labels_obj = Label.objects.create(name='foo', description='bar', organization=organization)
|
||||
@ -30,7 +32,7 @@ def runtime_data(organization, credentialtype_ssh):
|
||||
execution_environment=ee_obj.pk,
|
||||
labels=[labels_obj.pk],
|
||||
forks=7,
|
||||
job_slice_count=12,
|
||||
job_slice_count=2,
|
||||
timeout=10,
|
||||
instance_groups=[ig_obj.pk],
|
||||
)
|
||||
@ -189,7 +191,7 @@ def test_job_accept_empty_tags(job_template_prompts, post, admin_user, mocker):
|
||||
with mocker.patch('awx.api.serializers.JobSerializer.to_representation'):
|
||||
post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_tags': '', 'skip_tags': ''}, admin_user, expect=201)
|
||||
assert JobTemplate.create_unified_job.called
|
||||
assert JobTemplate.create_unified_job.call_args == ({'job_tags': '', 'skip_tags': '', 'forks': 1, 'job_slice_count': 0},)
|
||||
assert JobTemplate.create_unified_job.call_args == ({'job_tags': '', 'skip_tags': ''},)
|
||||
|
||||
mock_job.signal_start.assert_called_once()
|
||||
|
||||
@ -211,6 +213,17 @@ def test_slice_timeout_forks_need_int(job_template_prompts, post, admin_user, mo
|
||||
assert 'timeout' in response.data and response.data['timeout'][0] == 'A valid integer is required.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_slice_count_not_supported(job_template_prompts, post, admin_user):
|
||||
job_template = job_template_prompts(True)
|
||||
assert job_template.inventory.hosts.count() == 0
|
||||
job_template.inventory.hosts.create(name='foo')
|
||||
|
||||
response = post(reverse('api:job_template_launch', kwargs={'pk': job_template.pk}), {'job_slice_count': 8}, admin_user, expect=400)
|
||||
assert response.data['job_slice_count'][0] == 'Job inventory does not have enough hosts for slicing'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.job_runtime_vars
|
||||
def test_job_accept_prompted_vars_null(runtime_data, job_template_prompts_null, post, rando, mocker):
|
||||
|
||||
@ -79,3 +79,11 @@ class TestSlicingModels:
|
||||
assert job_template.get_effective_slice_ct({'inventory': inventory2}) == 2
|
||||
# Now we are going to pass in an override (like the prompt would) and as long as that is < host count we expect that back
|
||||
assert job_template.get_effective_slice_ct({'inventory': inventory2, 'job_slice_count': 3}) == 3
|
||||
|
||||
def test_slice_count_prompt_limited_by_inventory(self, job_template, inventory, organization):
|
||||
assert inventory.hosts.count() == 0
|
||||
job_template.inventory = inventory
|
||||
inventory.hosts.create(name='foo')
|
||||
|
||||
unified_job = job_template.create_unified_job(job_slice_count=2)
|
||||
assert isinstance(unified_job, Job)
|
||||
|
||||
@ -154,6 +154,7 @@ class TestLaunchConfig:
|
||||
job = Job.objects.create(job_template=job_template)
|
||||
ig1 = InstanceGroup.objects.create(name='bar')
|
||||
ig2 = InstanceGroup.objects.create(name='foo')
|
||||
job_template.instance_groups.add(ig2)
|
||||
label1 = Label.objects.create(name='foo', description='bar', organization=organization)
|
||||
label2 = Label.objects.create(name='faz', description='baz', organization=organization)
|
||||
# Order should matter here which is why we do 2 and then 1
|
||||
|
||||
@ -55,7 +55,6 @@ class TestJobSerializerGetRelated:
|
||||
'job_events',
|
||||
'relaunch',
|
||||
'labels',
|
||||
'instance_groups',
|
||||
],
|
||||
)
|
||||
def test_get_related(self, test_get_related, job, related_resource_name):
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user