comment everything related to instance group, will add back once 13584 goes in

linting

linting again

Use the correct role on org permission check

Co-authored-by: Elijah DeLee <kdelee@redhat.com>

Update docs/bulk_api.md

Co-authored-by: Elijah DeLee <kdelee@redhat.com>

Update docs/bulk_api.md

Co-authored-by: Elijah DeLee <kdelee@redhat.com>

Update awx/main/access.py

Co-authored-by: Elijah DeLee <kdelee@redhat.com>

Update awx/main/access.py

Co-authored-by: Elijah DeLee <kdelee@redhat.com>

Update docs/bulk_api.md

Co-authored-by: Alan Rominger <arominge@redhat.com>

fix collection test (#19)

improve readability of through model object creation (#18)

lower num jobs/hosts in tests (#20)

we can test query scaling at lower numbers, to reduce
load in tests. We suspect this was causing some flake
in the tests on PRs

adjust the num of queries
This commit is contained in:
jainnikhil30 2023-03-07 19:10:19 +05:30 committed by Elijah DeLee
parent ac99708952
commit 0de7551477
6 changed files with 51 additions and 26 deletions

View File

@ -4544,11 +4544,12 @@ class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
# many-to-many fields
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credentials', 'labels', 'instance_groups') # m2m fields are not canonical for WJ nodes
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
def validate(self, attrs):
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
@ -4608,21 +4609,21 @@ class BulkJobLaunchSerializer(serializers.Serializer):
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
requested_use_credentials = set()
requested_use_labels = set()
requested_use_instance_groups = set()
# requested_use_instance_groups = set()
for job in attrs['jobs']:
for cred in job.get('credentials', []):
requested_use_credentials.add(cred)
for label in job.get('labels', []):
requested_use_labels.add(label)
for instance_group in job.get('instance_groups', []):
requested_use_instance_groups.add(instance_group)
# for instance_group in job.get('instance_groups', []):
# requested_use_instance_groups.add(instance_group)
key_to_obj_map = {
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
"instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
}
@ -4649,7 +4650,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
self.check_list_permission(Label, requested_use_labels)
self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
@ -4696,7 +4697,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
node_m2m_object_types_to_through_model = {
'credentials': WorkflowJobNode.credentials.through,
'labels': WorkflowJobNode.labels.through,
'instance_groups': WorkflowJobNode.instance_groups.through,
# 'instance_groups': WorkflowJobNode.instance_groups.through,
}
node_deferred_attr_names = (
'limit',
@ -4740,20 +4741,20 @@ class BulkJobLaunchSerializer(serializers.Serializer):
WorkflowJobNode.objects.bulk_create(nodes)
# Deal with the m2m objects we have to create once the node exists
for obj_type, obj_through_model in node_m2m_object_types_to_through_model.items():
through_models = []
for field_name, through_model in node_m2m_object_types_to_through_model.items():
through_model_objects = []
for node_identifier in node_m2m_objects.keys():
if obj_type in node_m2m_objects[node_identifier] and obj_type == 'credentials':
for cred in node_m2m_objects[node_identifier][obj_type]:
through_models.append(obj_through_model(credential=cred, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if obj_type in node_m2m_objects[node_identifier] and obj_type == 'labels':
for label in node_m2m_objects[node_identifier][obj_type]:
through_models.append(obj_through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
for instance_group in node_m2m_objects[node_identifier][obj_type]:
through_models.append(obj_through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if through_models:
obj_through_model.objects.bulk_create(through_models)
if field_name in node_m2m_objects[node_identifier] and field_name == 'credentials':
for cred in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(credential=cred, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
for label in node_m2m_objects[node_identifier][field_name]:
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
if through_model_objects:
through_model.objects.bulk_create(through_model_objects)
wfj.save()
wfj.signal_start()
@ -4765,7 +4766,7 @@ class BulkJobLaunchSerializer(serializers.Serializer):
# - If the orgs is not set, set it to the org of the launching user
# - If the user is part of multiple orgs, throw a validation error saying user is part of multiple orgs, please provide one
if not request.user.is_superuser:
read_org_qs = Organization.accessible_objects(request.user, 'read_role')
read_org_qs = Organization.accessible_objects(request.user, 'member_role')
if 'organization' not in attrs or attrs['organization'] == None or attrs['organization'] == '':
read_org_ct = read_org_qs.count()
if read_org_ct == 1:

View File

@ -2004,6 +2004,7 @@ class WorkflowJobNodeAccess(BaseAccess):
)
def can_read(self, obj):
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
if obj.workflow_job.is_bulk_job and obj.workflow_job.created_by_id == self.user.id:
return True
return super().can_read(obj)
@ -2138,6 +2139,7 @@ class WorkflowJobAccess(BaseAccess):
)
def can_read(self, obj):
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
if obj.is_bulk_job and obj.created_by_id == self.user.id:
return True
return super().can_read(obj)

View File

@ -10,7 +10,7 @@ from awx.main.scheduler import TaskManager
@pytest.mark.django_db
@pytest.mark.parametrize('num_hosts, num_queries', [(9, 15), (99, 20)])
@pytest.mark.parametrize('num_hosts, num_queries', [(1, 15), (10, 15)])
def test_bulk_host_create_num_queries(organization, inventory, post, get, user, num_hosts, num_queries, django_assert_max_num_queries):
'''
If I am a...
@ -80,7 +80,7 @@ def test_bulk_host_create_rbac(organization, inventory, post, get, user):
@pytest.mark.django_db
@pytest.mark.parametrize('num_jobs, num_queries', [(9, 30), (99, 35)])
@pytest.mark.parametrize('num_jobs, num_queries', [(1, 25), (10, 25)])
def test_bulk_job_launch_queries(job_template, organization, inventory, project, post, get, user, num_jobs, num_queries, django_assert_max_num_queries):
'''
if I have access to the unified job template

View File

@ -68,7 +68,6 @@ Notable releases of the `awx.awx` collection:
- 7.0.0 is intended to be identical to the content prior to the migration, aside from changes necessary to function as a collection.
- 11.0.0 has no non-deprecated modules that depend on the deprecated `tower-cli` [PyPI](https://pypi.org/project/ansible-tower-cli/).
- 19.2.1 large renaming purged "tower" names (like options and module names), adding redirects for old names
- 21.11.0 "tower" modules deprecated and symlinks removed.
- 0.0.1-devel is the version you should see if installing from source, which is intended for development and expected to be unstable.
The following notes are changes that may require changes to playbooks:

View File

@ -48,7 +48,7 @@
skip_tags: "skipbaz"
job_tags: "Hello World"
limit: "localhost"
wait: False
wait: True
inventory: Demo Inventory
organization: Default
register: result

View File

@ -4,6 +4,10 @@ Bulk API endpoints allows to perform bulk operations in single web request. Ther
- /api/v2/bulk/job_launch
- /api/v2/bulk/host_create
Making individual API calls in rapid succession or at high concurrency can overwhelm AWX's ability to serve web requests. When the application's ability to serve is exausted, clients often receive 504 timeout errors.
Allowing the client combine actions into fewer requests allows for launching more jobs or adding more hosts with fewer requests and less time without exauhsting Controller's ability to serve requests, making excessive and repetitive database queries, or using excessive database connections (each web request opens a seperate database connection).
## Bulk Job Launch
Provides feature in the API that allows a single web request to achieve multiple job launches. It creates a workflow job with individual jobs as nodes within the workflow job. It also supports providing promptable fields like inventory, credential etc.
@ -50,11 +54,30 @@ Prompted field value can also be provided at the top level. For example:
In the above example, `inventory: 2` will get used for the job templates (11, 12 and 13) in which inventory is marked as prompt of launch.
*Note:* The `instance_groups` relationship is not supported for node-level prompts, unlike `"credentials"` in the above example, and will be ignored if provided. See OPTIONS for `/api/v2/bulk/job_launch/` for what fields are accepted at the workflow and node level, as that is the ultimate source of truth to determine what fields the API will accept.
### RBAC For Bulk Job Launch
#### Who can bulk launch?
Anyone who is logged in can view the launch point. In order to launch a unified_job_template, you need to have either `update` or `execute` depending on the type of unified job (job template, project update, etc).
Launching using the bulk endpoint results in a workflow job being launched. For auditing purposes, in general we require to assign an organization to the resulting workflow. The logic for assigning this organization is as follows:
- Superusers may assign any organization or none. If they do not assign one, they will be the only user able to see the parent workflow.
- Users that are members of exactly 1 organization do not need to specify an organization, as their single organization will be used to assign to the resulting Workflow
- Users that are members of multiple organizations must specify the organization to assign to the resulting workflow. If they do not specify, an error will be returned indicating this requirement.
Example of specifying the organization:
{
"name": "Bulk Job Launch with org specified",
"jobs": [
{"unified_job_template": 12},
{"unified_job_template": 13}
],
"organization": 2
}
#### Who can see bulk jobs that have been run?
System admins and Organization admins will see Bulk Jobs in the workflow jobs list and the unified jobs list. They can additionally see these individual workflow jobs.