mirror of
https://github.com/ansible/awx.git
synced 2026-02-05 11:34:43 -03:30
Compare commits
75 Commits
21.13.0
...
avoid_reap
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3f83647600 | ||
|
|
6461ecc762 | ||
|
|
3e6e0463b9 | ||
|
|
ededc61a71 | ||
|
|
3747f5b097 | ||
|
|
790ccd984c | ||
|
|
5d0849d746 | ||
|
|
7f1750324f | ||
|
|
a63067da38 | ||
|
|
7a45048463 | ||
|
|
97a5e87448 | ||
|
|
11475590e7 | ||
|
|
7e88a735ad | ||
|
|
2f3e65d4ef | ||
|
|
cc18c1220a | ||
|
|
d2aa1b94e3 | ||
|
|
a97c1b46c0 | ||
|
|
6a3282a689 | ||
|
|
be27d89895 | ||
|
|
160508c907 | ||
|
|
5a3900a927 | ||
|
|
f2bfaf7aca | ||
|
|
d1cf7245f7 | ||
|
|
0de7551477 | ||
|
|
ac99708952 | ||
|
|
47b7bbeda7 | ||
|
|
bca0f2dd47 | ||
|
|
3efc7d5bc4 | ||
|
|
4b9ca3deee | ||
|
|
f622d3a1e6 | ||
|
|
ede1b9af92 | ||
|
|
2becc5dda9 | ||
|
|
7aad16964c | ||
|
|
b1af27c4f6 | ||
|
|
7cb16ef91d | ||
|
|
9358d59f20 | ||
|
|
9e037f1a02 | ||
|
|
266ebe5501 | ||
|
|
ce5270434c | ||
|
|
34834252ff | ||
|
|
861ba8a727 | ||
|
|
02e5ba5f94 | ||
|
|
81ba6c0234 | ||
|
|
5c47c24e28 | ||
|
|
752289e175 | ||
|
|
a24aaba6bc | ||
|
|
349785550c | ||
|
|
ab6511a833 | ||
|
|
a7b4c03188 | ||
|
|
a5f9506f49 | ||
|
|
8e6f4fae80 | ||
|
|
a952ab0a75 | ||
|
|
7cca6c4cd9 | ||
|
|
3945db60eb | ||
|
|
252b0dda9f | ||
|
|
0a2f1622f6 | ||
|
|
00817d6b89 | ||
|
|
06808ef4c4 | ||
|
|
3aba5b5a04 | ||
|
|
5c19efdc32 | ||
|
|
217dc57c24 | ||
|
|
90f54b98cd | ||
|
|
b143df3183 | ||
|
|
6fa22f5be2 | ||
|
|
d5de1f9d11 | ||
|
|
7cca39d069 | ||
|
|
a6a9d3427c | ||
|
|
f0c91bb1f3 | ||
|
|
b1dceefac3 | ||
|
|
bb65945b4f | ||
|
|
1b8f6630bf | ||
|
|
5157838d83 | ||
|
|
ebabea54e1 | ||
|
|
0eaa7816e9 | ||
|
|
83149519f8 |
22
.github/workflows/devel_images.yml
vendored
22
.github/workflows/devel_images.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
branches:
|
||||
- devel
|
||||
- release_*
|
||||
- feature_*
|
||||
jobs:
|
||||
push:
|
||||
if: endsWith(github.repository, '/awx') || startsWith(github.ref, 'refs/heads/release_')
|
||||
@@ -20,6 +21,12 @@ jobs:
|
||||
- name: Get python version from Makefile
|
||||
run: echo py_version=`make PYTHON_VERSION` >> $GITHUB_ENV
|
||||
|
||||
- name: Set lower case owner name
|
||||
run: |
|
||||
echo "OWNER_LC=${OWNER,,}" >>${GITHUB_ENV}
|
||||
env:
|
||||
OWNER: '${{ github.repository_owner }}'
|
||||
|
||||
- name: Install python ${{ env.py_version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
@@ -31,15 +38,18 @@ jobs:
|
||||
|
||||
- name: Pre-pull image to warm build cache
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/} || :
|
||||
docker pull ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/} || :
|
||||
|
||||
- name: Build images
|
||||
run: |
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${{ github.repository_owner }} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make docker-compose-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-dev-build
|
||||
DEV_DOCKER_TAG_BASE=ghcr.io/${OWNER_LC} COMPOSE_TAG=${GITHUB_REF##*/} make awx-kube-build
|
||||
|
||||
- name: Push image
|
||||
run: |
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${{ github.repository_owner }}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx_kube_devel:${GITHUB_REF##*/}
|
||||
docker push ghcr.io/${OWNER_LC}/awx:${GITHUB_REF##*/}
|
||||
|
||||
21
Makefile
21
Makefile
@@ -411,12 +411,14 @@ ui-release: $(UI_BUILD_FLAG_FILE)
|
||||
|
||||
ui-devel: awx/ui/node_modules
|
||||
@$(MAKE) -B $(UI_BUILD_FLAG_FILE)
|
||||
mkdir -p /var/lib/awx/public/static/css
|
||||
mkdir -p /var/lib/awx/public/static/js
|
||||
mkdir -p /var/lib/awx/public/static/media
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media
|
||||
@if [ -d "/var/lib/awx" ] ; then \
|
||||
mkdir -p /var/lib/awx/public/static/css; \
|
||||
mkdir -p /var/lib/awx/public/static/js; \
|
||||
mkdir -p /var/lib/awx/public/static/media; \
|
||||
cp -r awx/ui/build/static/css/* /var/lib/awx/public/static/css; \
|
||||
cp -r awx/ui/build/static/js/* /var/lib/awx/public/static/js; \
|
||||
cp -r awx/ui/build/static/media/* /var/lib/awx/public/static/media; \
|
||||
fi
|
||||
|
||||
ui-devel-instrumented: awx/ui/node_modules
|
||||
$(NPM_BIN) --prefix awx/ui --loglevel warn run start-instrumented
|
||||
@@ -536,10 +538,8 @@ docker-compose-build:
|
||||
--cache-from=$(DEV_DOCKER_TAG_BASE)/awx_devel:$(COMPOSE_TAG) .
|
||||
|
||||
docker-clean:
|
||||
$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
if [ "$(shell docker images | grep awx_devel)" ]; then \
|
||||
docker images | grep awx_devel | awk '{print $$3}' | xargs docker rmi --force; \
|
||||
fi
|
||||
-$(foreach container_id,$(shell docker ps -f name=tools_awx -aq && docker ps -f name=tools_receptor -aq),docker stop $(container_id); docker rm -f $(container_id);)
|
||||
-$(foreach image_id,$(shell docker images --filter=reference='*awx_devel*' -aq),docker rmi --force $(image_id);)
|
||||
|
||||
docker-clean-volumes: docker-compose-clean docker-compose-container-group-clean
|
||||
docker volume rm -f tools_awx_db tools_grafana_storage tools_prometheus_storage $(docker volume ls --filter name=tools_redis_socket_ -q)
|
||||
@@ -573,6 +573,7 @@ VERSION:
|
||||
PYTHON_VERSION:
|
||||
@echo "$(PYTHON)" | sed 's:python::'
|
||||
|
||||
.PHONY: Dockerfile
|
||||
Dockerfile: tools/ansible/roles/dockerfile/templates/Dockerfile.j2
|
||||
ansible-playbook tools/ansible/dockerfile.yml -e receptor_image=$(RECEPTOR_IMAGE)
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import logging
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from datetime import timedelta
|
||||
from uuid import uuid4
|
||||
|
||||
# OAuth2
|
||||
from oauthlib import oauth2
|
||||
@@ -109,11 +110,14 @@ from awx.main.utils import (
|
||||
encrypt_dict,
|
||||
prefetch_page_capabilities,
|
||||
truncate_stdout,
|
||||
get_licenser,
|
||||
)
|
||||
from awx.main.utils.filters import SmartFilter
|
||||
from awx.main.utils.named_url_graph import reset_counters
|
||||
from awx.main.scheduler.task_manager_models import TaskManagerModels
|
||||
from awx.main.redact import UriCleaner, REPLACE_STR
|
||||
from awx.main.signals import update_inventory_computed_fields
|
||||
|
||||
|
||||
from awx.main.validators import vars_validate_or_raise
|
||||
|
||||
@@ -156,7 +160,7 @@ SUMMARIZABLE_FK_FIELDS = {
|
||||
'default_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'execution_environment': DEFAULT_SUMMARY_FIELDS + ('image',),
|
||||
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
|
||||
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type', 'allow_override'),
|
||||
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed'),
|
||||
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'kubernetes', 'credential_type_id'),
|
||||
'signature_validation_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'credential_type_id'),
|
||||
@@ -1853,7 +1857,7 @@ class HostSerializer(BaseSerializerWithVariables):
|
||||
vars_dict = parse_yaml_or_json(variables)
|
||||
vars_dict['ansible_ssh_port'] = port
|
||||
attrs['variables'] = json.dumps(vars_dict)
|
||||
if Group.objects.filter(name=name, inventory=inventory).exists():
|
||||
if inventory and Group.objects.filter(name=name, inventory=inventory).exists():
|
||||
raise serializers.ValidationError(_('A Group with that name already exists.'))
|
||||
|
||||
return super(HostSerializer, self).validate(attrs)
|
||||
@@ -1945,6 +1949,130 @@ class GroupSerializer(BaseSerializerWithVariables):
|
||||
return ret
|
||||
|
||||
|
||||
class BulkHostSerializer(HostSerializer):
|
||||
class Meta:
|
||||
model = Host
|
||||
fields = (
|
||||
'name',
|
||||
'enabled',
|
||||
'instance_id',
|
||||
'description',
|
||||
'variables',
|
||||
)
|
||||
|
||||
|
||||
class BulkHostCreateSerializer(serializers.Serializer):
|
||||
inventory = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Inventory.objects.all(), required=True, write_only=True, help_text=_('Primary Key ID of inventory to add hosts to.')
|
||||
)
|
||||
hosts = serializers.ListField(
|
||||
child=BulkHostSerializer(),
|
||||
allow_empty=False,
|
||||
max_length=100000,
|
||||
write_only=True,
|
||||
help_text=_('List of hosts to be created, JSON. e.g. [{"name": "example.com"}, {"name": "127.0.0.1"}]'),
|
||||
)
|
||||
|
||||
class Meta:
|
||||
model = Inventory
|
||||
fields = ('inventory', 'hosts')
|
||||
read_only_fields = ()
|
||||
|
||||
def raise_if_host_counts_violated(self, attrs):
|
||||
validation_info = get_licenser().validate()
|
||||
|
||||
org = attrs['inventory'].organization
|
||||
|
||||
if org:
|
||||
org_active_count = Host.objects.org_active_count(org.id)
|
||||
new_hosts = [h['name'] for h in attrs['hosts']]
|
||||
org_net_new_host_count = len(new_hosts) - Host.objects.filter(inventory__organization=1, name__in=new_hosts).values('name').distinct().count()
|
||||
if org.max_hosts > 0 and org_active_count + org_net_new_host_count > org.max_hosts:
|
||||
raise PermissionDenied(
|
||||
_(
|
||||
"You have already reached the maximum number of %s hosts"
|
||||
" allowed for your organization. Contact your System Administrator"
|
||||
" for assistance." % org.max_hosts
|
||||
)
|
||||
)
|
||||
|
||||
# Don't check license if it is open license
|
||||
if validation_info.get('license_type', 'UNLICENSED') == 'open':
|
||||
return
|
||||
|
||||
sys_free_instances = validation_info.get('free_instances', 0)
|
||||
system_net_new_host_count = Host.objects.exclude(name__in=new_hosts).count()
|
||||
|
||||
if system_net_new_host_count > sys_free_instances:
|
||||
hard_error = validation_info.get('trial', False) is True or validation_info['instance_count'] == 10
|
||||
if hard_error:
|
||||
# Only raise permission error for trial, otherwise just log a warning as we do in other inventory import situations
|
||||
raise PermissionDenied(_("Host count exceeds available instances."))
|
||||
logger.warning(_("Number of hosts allowed by license has been exceeded."))
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
inv = attrs['inventory']
|
||||
if inv.kind != '':
|
||||
raise serializers.ValidationError(_('Hosts can only be created in manual inventories (not smart or constructed types).'))
|
||||
if len(attrs['hosts']) > settings.BULK_HOST_MAX_CREATE:
|
||||
raise serializers.ValidationError(_('Number of hosts exceeds system setting BULK_HOST_MAX_CREATE'))
|
||||
if request and not request.user.is_superuser:
|
||||
if request.user not in inv.admin_role:
|
||||
raise serializers.ValidationError(_(f'Inventory with id {inv.id} not found or lack permissions to add hosts.'))
|
||||
current_hostnames = set(inv.hosts.values_list('name', flat=True))
|
||||
new_names = [host['name'] for host in attrs['hosts']]
|
||||
duplicate_new_names = [n for n in new_names if n in current_hostnames or new_names.count(n) > 1]
|
||||
if duplicate_new_names:
|
||||
raise serializers.ValidationError(_(f'Hostnames must be unique in an inventory. Duplicates found: {duplicate_new_names}'))
|
||||
|
||||
self.raise_if_host_counts_violated(attrs)
|
||||
|
||||
_now = now()
|
||||
for host in attrs['hosts']:
|
||||
host['created'] = _now
|
||||
host['modified'] = _now
|
||||
host['inventory'] = inv
|
||||
return attrs
|
||||
|
||||
def create(self, validated_data):
|
||||
# This assumes total_hosts is up to date, and it can get out of date if the inventory computed fields have not been updated lately.
|
||||
# If we wanted to side step this we could query Hosts.objects.filter(inventory...)
|
||||
old_total_hosts = validated_data['inventory'].total_hosts
|
||||
result = [Host(**attrs) for attrs in validated_data['hosts']]
|
||||
try:
|
||||
Host.objects.bulk_create(result)
|
||||
except Exception as e:
|
||||
raise serializers.ValidationError({"detail": _(f"cannot create host, host creation error {e}")})
|
||||
new_total_hosts = old_total_hosts + len(result)
|
||||
request = self.context.get('request', None)
|
||||
changes = {'total_hosts': [old_total_hosts, new_total_hosts]}
|
||||
activity_entry = ActivityStream.objects.create(
|
||||
operation='update',
|
||||
object1='inventory',
|
||||
changes=json.dumps(changes),
|
||||
actor=request.user,
|
||||
)
|
||||
activity_entry.inventory.add(validated_data['inventory'])
|
||||
|
||||
# This actually updates the cached "total_hosts" field on the inventory
|
||||
update_inventory_computed_fields.delay(validated_data['inventory'].id)
|
||||
return_keys = [k for k in BulkHostSerializer().fields.keys()] + ['id']
|
||||
return_data = {}
|
||||
host_data = []
|
||||
for r in result:
|
||||
item = {k: getattr(r, k) for k in return_keys}
|
||||
if not settings.IS_TESTING_MODE:
|
||||
# sqlite acts different with bulk_create -- it doesn't return the id of the objects
|
||||
# to get it, you have to do an additional query, which is not useful for our tests
|
||||
item['url'] = reverse('api:host_detail', kwargs={'pk': r.id})
|
||||
item['inventory'] = reverse('api:inventory_detail', kwargs={'pk': validated_data['inventory'].id})
|
||||
host_data.append(item)
|
||||
return_data['url'] = reverse('api:inventory_detail', kwargs={'pk': validated_data['inventory'].id})
|
||||
return_data['hosts'] = host_data
|
||||
return return_data
|
||||
|
||||
|
||||
class GroupTreeSerializer(GroupSerializer):
|
||||
children = serializers.SerializerMethodField()
|
||||
|
||||
@@ -2000,6 +2128,7 @@ class InventorySourceOptionsSerializer(BaseSerializer):
|
||||
'source',
|
||||
'source_path',
|
||||
'source_vars',
|
||||
'scm_branch',
|
||||
'credential',
|
||||
'enabled_var',
|
||||
'enabled_value',
|
||||
@@ -2164,10 +2293,14 @@ class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOpt
|
||||
if ('source' in attrs or 'source_project' in attrs) and get_field_from_model_or_attrs('source_project') is None:
|
||||
raise serializers.ValidationError({"source_project": _("Project required for scm type sources.")})
|
||||
else:
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path']))
|
||||
redundant_scm_fields = list(filter(lambda x: attrs.get(x, None), ['source_project', 'source_path', 'scm_branch']))
|
||||
if redundant_scm_fields:
|
||||
raise serializers.ValidationError({"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))})
|
||||
|
||||
project = get_field_from_model_or_attrs('source_project')
|
||||
if get_field_from_model_or_attrs('scm_branch') and not project.allow_override:
|
||||
raise serializers.ValidationError({'scm_branch': _('Project does not allow overriding branch.')})
|
||||
|
||||
attrs = super(InventorySourceSerializer, self).validate(attrs)
|
||||
|
||||
# Check type consistency of source and cloud credential, if provided
|
||||
@@ -4405,6 +4538,271 @@ class WorkflowJobLaunchSerializer(BaseSerializer):
|
||||
return accepted
|
||||
|
||||
|
||||
class BulkJobNodeSerializer(WorkflowJobNodeSerializer):
|
||||
# We don't do a PrimaryKeyRelatedField for unified_job_template and others, because that increases the number
|
||||
# of database queries, rather we take them as integer and later convert them to objects in get_objectified_jobs
|
||||
unified_job_template = serializers.IntegerField(
|
||||
required=True, min_value=1, help_text=_('Primary key of the template for this job, can be a job template or inventory source.')
|
||||
)
|
||||
inventory = serializers.IntegerField(required=False, min_value=1)
|
||||
execution_environment = serializers.IntegerField(required=False, min_value=1)
|
||||
# many-to-many fields
|
||||
credentials = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
labels = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
# TODO: Use instance group role added via PR 13584(once merged), for now everything related to instance group is commented
|
||||
# instance_groups = serializers.ListField(child=serializers.IntegerField(min_value=1), required=False)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJobNode
|
||||
fields = ('*', 'credentials', 'labels') # m2m fields are not canonical for WJ nodes, TODO: add instance_groups once supported
|
||||
|
||||
def validate(self, attrs):
|
||||
return super(LaunchConfigurationBaseSerializer, self).validate(attrs)
|
||||
|
||||
def get_validation_exclusions(self, obj=None):
|
||||
ret = super().get_validation_exclusions(obj)
|
||||
ret.extend(['unified_job_template', 'inventory', 'execution_environment'])
|
||||
return ret
|
||||
|
||||
|
||||
class BulkJobLaunchSerializer(serializers.Serializer):
|
||||
name = serializers.CharField(default='Bulk Job Launch', max_length=512, write_only=True, required=False, allow_blank=True) # limited by max name of jobs
|
||||
jobs = BulkJobNodeSerializer(
|
||||
many=True,
|
||||
allow_empty=False,
|
||||
write_only=True,
|
||||
max_length=100000,
|
||||
help_text=_('List of jobs to be launched, JSON. e.g. [{"unified_job_template": 7}, {"unified_job_template": 10}]'),
|
||||
)
|
||||
description = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
extra_vars = serializers.JSONField(write_only=True, required=False)
|
||||
organization = serializers.PrimaryKeyRelatedField(
|
||||
queryset=Organization.objects.all(),
|
||||
required=False,
|
||||
default=None,
|
||||
allow_null=True,
|
||||
write_only=True,
|
||||
help_text=_('Inherit permissions from this organization. If not provided, a organization the user is a member of will be selected automatically.'),
|
||||
)
|
||||
inventory = serializers.PrimaryKeyRelatedField(queryset=Inventory.objects.all(), required=False, write_only=True)
|
||||
limit = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
scm_branch = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
skip_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
job_tags = serializers.CharField(write_only=True, required=False, allow_blank=False)
|
||||
|
||||
class Meta:
|
||||
model = WorkflowJob
|
||||
fields = ('name', 'jobs', 'description', 'extra_vars', 'organization', 'inventory', 'limit', 'scm_branch', 'skip_tags', 'job_tags')
|
||||
read_only_fields = ()
|
||||
|
||||
def validate(self, attrs):
|
||||
request = self.context.get('request', None)
|
||||
identifiers = set()
|
||||
if len(attrs['jobs']) > settings.BULK_JOB_MAX_LAUNCH:
|
||||
raise serializers.ValidationError(_('Number of requested jobs exceeds system setting BULK_JOB_MAX_LAUNCH'))
|
||||
|
||||
for node in attrs['jobs']:
|
||||
if 'identifier' in node:
|
||||
if node['identifier'] in identifiers:
|
||||
raise serializers.ValidationError(_(f"Identifier {node['identifier']} not unique"))
|
||||
identifiers.add(node['identifier'])
|
||||
else:
|
||||
node['identifier'] = str(uuid4())
|
||||
|
||||
requested_ujts = {j['unified_job_template'] for j in attrs['jobs']}
|
||||
requested_use_inventories = {job['inventory'] for job in attrs['jobs'] if 'inventory' in job}
|
||||
requested_use_execution_environments = {job['execution_environment'] for job in attrs['jobs'] if 'execution_environment' in job}
|
||||
requested_use_credentials = set()
|
||||
requested_use_labels = set()
|
||||
# requested_use_instance_groups = set()
|
||||
for job in attrs['jobs']:
|
||||
for cred in job.get('credentials', []):
|
||||
requested_use_credentials.add(cred)
|
||||
for label in job.get('labels', []):
|
||||
requested_use_labels.add(label)
|
||||
# for instance_group in job.get('instance_groups', []):
|
||||
# requested_use_instance_groups.add(instance_group)
|
||||
|
||||
key_to_obj_map = {
|
||||
"unified_job_template": {obj.id: obj for obj in UnifiedJobTemplate.objects.filter(id__in=requested_ujts)},
|
||||
"inventory": {obj.id: obj for obj in Inventory.objects.filter(id__in=requested_use_inventories)},
|
||||
"credentials": {obj.id: obj for obj in Credential.objects.filter(id__in=requested_use_credentials)},
|
||||
"labels": {obj.id: obj for obj in Label.objects.filter(id__in=requested_use_labels)},
|
||||
# "instance_groups": {obj.id: obj for obj in InstanceGroup.objects.filter(id__in=requested_use_instance_groups)},
|
||||
"execution_environment": {obj.id: obj for obj in ExecutionEnvironment.objects.filter(id__in=requested_use_execution_environments)},
|
||||
}
|
||||
|
||||
ujts = {}
|
||||
for ujt in key_to_obj_map['unified_job_template'].values():
|
||||
ujts.setdefault(type(ujt), [])
|
||||
ujts[type(ujt)].append(ujt)
|
||||
|
||||
unallowed_types = set(ujts.keys()) - set([JobTemplate, Project, InventorySource, WorkflowJobTemplate])
|
||||
if unallowed_types:
|
||||
type_names = ' '.join([cls._meta.verbose_name.title() for cls in unallowed_types])
|
||||
raise serializers.ValidationError(_("Template types {type_names} not allowed in bulk jobs").format(type_names=type_names))
|
||||
|
||||
for model, obj_list in ujts.items():
|
||||
role_field = 'execute_role' if issubclass(model, (JobTemplate, WorkflowJobTemplate)) else 'update_role'
|
||||
self.check_list_permission(model, set([obj.id for obj in obj_list]), role_field)
|
||||
|
||||
self.check_organization_permission(attrs, request)
|
||||
|
||||
if 'inventory' in attrs:
|
||||
requested_use_inventories.add(attrs['inventory'].id)
|
||||
|
||||
self.check_list_permission(Inventory, requested_use_inventories, 'use_role')
|
||||
|
||||
self.check_list_permission(Credential, requested_use_credentials, 'use_role')
|
||||
self.check_list_permission(Label, requested_use_labels)
|
||||
# self.check_list_permission(InstanceGroup, requested_use_instance_groups) # TODO: change to use_role for conflict
|
||||
self.check_list_permission(ExecutionEnvironment, requested_use_execution_environments) # TODO: change if roles introduced
|
||||
|
||||
jobs_object = self.get_objectified_jobs(attrs, key_to_obj_map)
|
||||
|
||||
attrs['jobs'] = jobs_object
|
||||
if 'extra_vars' in attrs:
|
||||
extra_vars_dict = parse_yaml_or_json(attrs['extra_vars'])
|
||||
attrs['extra_vars'] = json.dumps(extra_vars_dict)
|
||||
attrs = super().validate(attrs)
|
||||
return attrs
|
||||
|
||||
def check_list_permission(self, model, id_list, role_field=None):
|
||||
if not id_list:
|
||||
return
|
||||
user = self.context['request'].user
|
||||
if role_field is None: # implies "read" level permission is required
|
||||
access_qs = user.get_queryset(model)
|
||||
else:
|
||||
access_qs = model.accessible_objects(user, role_field)
|
||||
|
||||
not_allowed = set(id_list) - set(access_qs.filter(id__in=id_list).values_list('id', flat=True))
|
||||
if not_allowed:
|
||||
raise serializers.ValidationError(
|
||||
_("{model_name} {not_allowed} not found or you don't have permissions to access it").format(
|
||||
model_name=model._meta.verbose_name_plural.title(), not_allowed=not_allowed
|
||||
)
|
||||
)
|
||||
|
||||
def create(self, validated_data):
|
||||
request = self.context.get('request', None)
|
||||
launch_user = request.user if request else None
|
||||
job_node_data = validated_data.pop('jobs')
|
||||
wfj_deferred_attr_names = ('skip_tags', 'limit', 'job_tags')
|
||||
wfj_deferred_vals = {}
|
||||
for item in wfj_deferred_attr_names:
|
||||
wfj_deferred_vals[item] = validated_data.pop(item, None)
|
||||
|
||||
wfj = WorkflowJob.objects.create(**validated_data, is_bulk_job=True, launch_type='manual', created_by=launch_user)
|
||||
for key, val in wfj_deferred_vals.items():
|
||||
if val:
|
||||
setattr(wfj, key, val)
|
||||
nodes = []
|
||||
node_m2m_objects = {}
|
||||
node_m2m_object_types_to_through_model = {
|
||||
'credentials': WorkflowJobNode.credentials.through,
|
||||
'labels': WorkflowJobNode.labels.through,
|
||||
# 'instance_groups': WorkflowJobNode.instance_groups.through,
|
||||
}
|
||||
node_deferred_attr_names = (
|
||||
'limit',
|
||||
'scm_branch',
|
||||
'verbosity',
|
||||
'forks',
|
||||
'diff_mode',
|
||||
'job_tags',
|
||||
'job_type',
|
||||
'skip_tags',
|
||||
'job_slice_count',
|
||||
'timeout',
|
||||
)
|
||||
node_deferred_attrs = {}
|
||||
for node_attrs in job_node_data:
|
||||
# we need to add any m2m objects after creation via the through model
|
||||
node_m2m_objects[node_attrs['identifier']] = {}
|
||||
node_deferred_attrs[node_attrs['identifier']] = {}
|
||||
for item in node_m2m_object_types_to_through_model.keys():
|
||||
if item in node_attrs:
|
||||
node_m2m_objects[node_attrs['identifier']][item] = node_attrs.pop(item)
|
||||
|
||||
# Some attributes are not accepted by WorkflowJobNode __init__, we have to set them after
|
||||
for item in node_deferred_attr_names:
|
||||
if item in node_attrs:
|
||||
node_deferred_attrs[node_attrs['identifier']][item] = node_attrs.pop(item)
|
||||
|
||||
# Create the node objects
|
||||
node_obj = WorkflowJobNode(workflow_job=wfj, created=wfj.created, modified=wfj.modified, **node_attrs)
|
||||
|
||||
# we can set the deferred attrs now
|
||||
for item, value in node_deferred_attrs[node_attrs['identifier']].items():
|
||||
setattr(node_obj, item, value)
|
||||
|
||||
# the node is now ready to be bulk created
|
||||
nodes.append(node_obj)
|
||||
|
||||
# we'll need this later when we do the m2m through model bulk create
|
||||
node_m2m_objects[node_attrs['identifier']]['node'] = node_obj
|
||||
|
||||
WorkflowJobNode.objects.bulk_create(nodes)
|
||||
|
||||
# Deal with the m2m objects we have to create once the node exists
|
||||
for field_name, through_model in node_m2m_object_types_to_through_model.items():
|
||||
through_model_objects = []
|
||||
for node_identifier in node_m2m_objects.keys():
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'credentials':
|
||||
for cred in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(credential=cred, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if field_name in node_m2m_objects[node_identifier] and field_name == 'labels':
|
||||
for label in node_m2m_objects[node_identifier][field_name]:
|
||||
through_model_objects.append(through_model(label=label, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
# if obj_type in node_m2m_objects[node_identifier] and obj_type == 'instance_groups':
|
||||
# for instance_group in node_m2m_objects[node_identifier][obj_type]:
|
||||
# through_model_objects.append(through_model(instancegroup=instance_group, workflowjobnode=node_m2m_objects[node_identifier]['node']))
|
||||
if through_model_objects:
|
||||
through_model.objects.bulk_create(through_model_objects)
|
||||
|
||||
wfj.save()
|
||||
wfj.signal_start()
|
||||
|
||||
return WorkflowJobSerializer().to_representation(wfj)
|
||||
|
||||
def check_organization_permission(self, attrs, request):
|
||||
# validate Organization
|
||||
# - If the orgs is not set, set it to the org of the launching user
|
||||
# - If the user is part of multiple orgs, throw a validation error saying user is part of multiple orgs, please provide one
|
||||
if not request.user.is_superuser:
|
||||
read_org_qs = Organization.accessible_objects(request.user, 'member_role')
|
||||
if 'organization' not in attrs or attrs['organization'] == None or attrs['organization'] == '':
|
||||
read_org_ct = read_org_qs.count()
|
||||
if read_org_ct == 1:
|
||||
attrs['organization'] = read_org_qs.first()
|
||||
elif read_org_ct > 1:
|
||||
raise serializers.ValidationError("User has permission to multiple Organizations, please set one of them in the request")
|
||||
else:
|
||||
raise serializers.ValidationError("User not part of any organization, please assign an organization to assign to the bulk job")
|
||||
else:
|
||||
allowed_orgs = set(read_org_qs.values_list('id', flat=True))
|
||||
requested_org = attrs['organization']
|
||||
if requested_org.id not in allowed_orgs:
|
||||
raise ValidationError(_(f"Organization {requested_org.id} not found or you don't have permissions to access it"))
|
||||
|
||||
def get_objectified_jobs(self, attrs, key_to_obj_map):
|
||||
objectified_jobs = []
|
||||
# This loop is generalized so we should only have to add related items to the key_to_obj_map
|
||||
for job in attrs['jobs']:
|
||||
objectified_job = {}
|
||||
for key, value in job.items():
|
||||
if key in key_to_obj_map:
|
||||
if isinstance(value, int):
|
||||
objectified_job[key] = key_to_obj_map[key][value]
|
||||
elif isinstance(value, list):
|
||||
objectified_job[key] = [key_to_obj_map[key][item] for item in value]
|
||||
else:
|
||||
objectified_job[key] = value
|
||||
objectified_jobs.append(objectified_job)
|
||||
return objectified_jobs
|
||||
|
||||
|
||||
class NotificationTemplateSerializer(BaseSerializer):
|
||||
show_capabilities = ['edit', 'delete', 'copy']
|
||||
capabilities_prefetch = [{'copy': 'organization.admin'}]
|
||||
@@ -5073,6 +5471,8 @@ class InstanceGroupSerializer(BaseSerializer):
|
||||
res = super(InstanceGroupSerializer, self).get_related(obj)
|
||||
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
|
||||
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
|
||||
res['access_list'] = self.reverse('api:instance_group_access_list', kwargs={'pk': obj.pk})
|
||||
res['object_roles'] = self.reverse('api:instance_group_object_role_list', kwargs={'pk': obj.pk})
|
||||
if obj.credential:
|
||||
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
Version 1 of the Ansible Tower REST API.
|
||||
|
||||
Make a GET request to this resource to obtain a list of all child resources
|
||||
available via the API.
|
||||
@@ -7,10 +7,12 @@ the following fields (some fields may not be visible to all users):
|
||||
* `project_base_dir`: Path on the server where projects and playbooks are \
|
||||
stored.
|
||||
* `project_local_paths`: List of directories beneath `project_base_dir` to
|
||||
use when creating/editing a project.
|
||||
use when creating/editing a manual project.
|
||||
* `time_zone`: The configured time zone for the server.
|
||||
* `license_info`: Information about the current license.
|
||||
* `version`: Version of Ansible Tower package installed.
|
||||
* `custom_virtualenvs`: Deprecated venv locations from before migration to
|
||||
execution environments. Export tooling is in `awx-manage` commands.
|
||||
* `eula`: The current End-User License Agreement
|
||||
{% endifmeth %}
|
||||
|
||||
41
awx/api/templates/api/bulk_host_create_view.md
Normal file
41
awx/api/templates/api/bulk_host_create_view.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Bulk Host Create
|
||||
|
||||
This endpoint allows the client to create multiple hosts and associate them with an inventory. They may do this by providing the inventory ID and a list of json that would normally be provided to create hosts.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"inventory": 1,
|
||||
"hosts": [
|
||||
{"name": "example1.com", "variables": "ansible_connection: local"},
|
||||
{"name": "example2.com"}
|
||||
]
|
||||
}
|
||||
|
||||
Return data:
|
||||
|
||||
{
|
||||
"url": "/api/v2/inventories/3/hosts/",
|
||||
"hosts": [
|
||||
{
|
||||
"name": "example1.com",
|
||||
"enabled": true,
|
||||
"instance_id": "",
|
||||
"description": "",
|
||||
"variables": "ansible_connection: local",
|
||||
"id": 1255,
|
||||
"url": "/api/v2/hosts/1255/",
|
||||
"inventory": "/api/v2/inventories/3/"
|
||||
},
|
||||
{
|
||||
"name": "example2.com",
|
||||
"enabled": true,
|
||||
"instance_id": "",
|
||||
"description": "",
|
||||
"variables": "",
|
||||
"id": 1256,
|
||||
"url": "/api/v2/hosts/1256/",
|
||||
"inventory": "/api/v2/inventories/3/"
|
||||
}
|
||||
]
|
||||
}
|
||||
13
awx/api/templates/api/bulk_job_launch_view.md
Normal file
13
awx/api/templates/api/bulk_job_launch_view.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Bulk Job Launch
|
||||
|
||||
This endpoint allows the client to launch multiple UnifiedJobTemplates at a time, along side any launch time parameters that they would normally set at launch time.
|
||||
|
||||
Example:
|
||||
|
||||
{
|
||||
"name": "my bulk job",
|
||||
"jobs": [
|
||||
{"unified_job_template": 7, "inventory": 2},
|
||||
{"unified_job_template": 7, "credentials": [3]}
|
||||
]
|
||||
}
|
||||
3
awx/api/templates/api/bulk_view.md
Normal file
3
awx/api/templates/api/bulk_view.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Bulk Actions
|
||||
|
||||
This endpoint lists available bulk action APIs.
|
||||
@@ -1,11 +0,0 @@
|
||||
# List Fact Scans for a Host Specific Host Scan
|
||||
|
||||
Make a GET request to this resource to retrieve system tracking data for a particular scan
|
||||
|
||||
You may filter by datetime:
|
||||
|
||||
`?datetime=2015-06-01`
|
||||
|
||||
and module
|
||||
|
||||
`?datetime=2015-06-01&module=ansible`
|
||||
@@ -1,11 +0,0 @@
|
||||
# List Fact Scans for a Host by Module and Date
|
||||
|
||||
Make a GET request to this resource to retrieve system tracking scans by module and date/time
|
||||
|
||||
You may filter scan runs using the `from` and `to` properties:
|
||||
|
||||
`?from=2015-06-01%2012:00:00&to=2015-06-03`
|
||||
|
||||
You may also filter by module
|
||||
|
||||
`?module=packages`
|
||||
@@ -1 +0,0 @@
|
||||
# List Red Hat Insights for a Host
|
||||
@@ -1,21 +0,0 @@
|
||||
{% ifmeth GET %}
|
||||
# Determine if a Job can be started
|
||||
|
||||
Make a GET request to this resource to determine if the job can be started and
|
||||
whether any passwords are required to start the job. The response will include
|
||||
the following fields:
|
||||
|
||||
* `can_start`: Flag indicating if this job can be started (boolean, read-only)
|
||||
* `passwords_needed_to_start`: Password names required to start the job (array,
|
||||
read-only)
|
||||
{% endifmeth %}
|
||||
|
||||
{% ifmeth POST %}
|
||||
# Start a Job
|
||||
Make a POST request to this resource to start the job. If any passwords are
|
||||
required, they must be passed via POST data.
|
||||
|
||||
If successful, the response status code will be 202. If any required passwords
|
||||
are not provided, a 400 status code will be returned. If the job cannot be
|
||||
started, a 405 status code will be returned.
|
||||
{% endifmeth %}
|
||||
@@ -3,7 +3,14 @@
|
||||
|
||||
from django.urls import re_path
|
||||
|
||||
from awx.api.views import InstanceGroupList, InstanceGroupDetail, InstanceGroupUnifiedJobsList, InstanceGroupInstanceList
|
||||
from awx.api.views import (
|
||||
InstanceGroupList,
|
||||
InstanceGroupDetail,
|
||||
InstanceGroupUnifiedJobsList,
|
||||
InstanceGroupInstanceList,
|
||||
InstanceGroupAccessList,
|
||||
InstanceGroupObjectRolesList,
|
||||
)
|
||||
|
||||
|
||||
urls = [
|
||||
@@ -11,6 +18,8 @@ urls = [
|
||||
re_path(r'^(?P<pk>[0-9]+)/$', InstanceGroupDetail.as_view(), name='instance_group_detail'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/jobs/$', InstanceGroupUnifiedJobsList.as_view(), name='instance_group_unified_jobs_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/instances/$', InstanceGroupInstanceList.as_view(), name='instance_group_instance_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/access_list/$', InstanceGroupAccessList.as_view(), name='instance_group_access_list'),
|
||||
re_path(r'^(?P<pk>[0-9]+)/object_roles/$', InstanceGroupObjectRolesList.as_view(), name='instance_group_object_role_list'),
|
||||
]
|
||||
|
||||
__all__ = ['urls']
|
||||
|
||||
@@ -31,6 +31,13 @@ from awx.api.views import (
|
||||
ApplicationOAuth2TokenList,
|
||||
OAuth2ApplicationDetail,
|
||||
)
|
||||
|
||||
from awx.api.views.bulk import (
|
||||
BulkView,
|
||||
BulkHostCreateView,
|
||||
BulkJobLaunchView,
|
||||
)
|
||||
|
||||
from awx.api.views.mesh_visualizer import MeshVisualizer
|
||||
|
||||
from awx.api.views.metrics import MetricsView
|
||||
@@ -136,6 +143,9 @@ v2_urls = [
|
||||
re_path(r'^activity_stream/', include(activity_stream_urls)),
|
||||
re_path(r'^workflow_approval_templates/', include(workflow_approval_template_urls)),
|
||||
re_path(r'^workflow_approvals/', include(workflow_approval_urls)),
|
||||
re_path(r'^bulk/$', BulkView.as_view(), name='bulk'),
|
||||
re_path(r'^bulk/host_create/$', BulkHostCreateView.as_view(), name='bulk_host_create'),
|
||||
re_path(r'^bulk/job_launch/$', BulkJobLaunchView.as_view(), name='bulk_job_launch'),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -466,6 +466,23 @@ class InstanceGroupUnifiedJobsList(SubListAPIView):
|
||||
relationship = "unifiedjob_set"
|
||||
|
||||
|
||||
class InstanceGroupAccessList(ResourceAccessList):
|
||||
model = models.User # needs to be User for AccessLists
|
||||
parent_model = models.InstanceGroup
|
||||
|
||||
|
||||
class InstanceGroupObjectRolesList(SubListAPIView):
|
||||
model = models.Role
|
||||
serializer_class = serializers.RoleSerializer
|
||||
parent_model = models.InstanceGroup
|
||||
search_fields = ('role_field', 'content_type__model')
|
||||
|
||||
def get_queryset(self):
|
||||
po = self.get_parent_object()
|
||||
content_type = ContentType.objects.get_for_model(self.parent_model)
|
||||
return models.Role.objects.filter(content_type=content_type, object_id=po.pk)
|
||||
|
||||
|
||||
class InstanceGroupInstanceList(InstanceGroupMembershipMixin, SubListAttachDetachAPIView):
|
||||
name = _("Instance Group's Instances")
|
||||
model = models.Instance
|
||||
@@ -3078,7 +3095,9 @@ class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView):
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id')
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
|
||||
|
||||
class WorkflowJobTemplateJobsList(SubListAPIView):
|
||||
@@ -3172,7 +3191,9 @@ class WorkflowJobWorkflowNodesList(SubListAPIView):
|
||||
search_fields = ('unified_job_template__name', 'unified_job_template__description')
|
||||
|
||||
def get_queryset(self):
|
||||
return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id')
|
||||
parent = self.get_parent_object()
|
||||
self.check_parent_access(parent)
|
||||
return getattr(parent, self.relationship).order_by('id')
|
||||
|
||||
|
||||
class WorkflowJobCancel(GenericCancelView):
|
||||
|
||||
69
awx/api/views/bulk.py
Normal file
69
awx/api/views/bulk.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from rest_framework.permissions import IsAuthenticated
|
||||
from rest_framework.renderers import JSONRenderer
|
||||
from rest_framework.reverse import reverse
|
||||
from rest_framework import status
|
||||
from rest_framework.response import Response
|
||||
|
||||
from awx.main.models import UnifiedJob, Host
|
||||
from awx.api.generics import (
|
||||
GenericAPIView,
|
||||
APIView,
|
||||
)
|
||||
from awx.api import (
|
||||
serializers,
|
||||
renderers,
|
||||
)
|
||||
|
||||
|
||||
class BulkView(APIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
renderer_classes = [
|
||||
renderers.BrowsableAPIRenderer,
|
||||
JSONRenderer,
|
||||
]
|
||||
allowed_methods = ['GET', 'OPTIONS']
|
||||
|
||||
def get(self, request, format=None):
|
||||
'''List top level resources'''
|
||||
data = OrderedDict()
|
||||
data['host_create'] = reverse('api:bulk_host_create', request=request)
|
||||
data['job_launch'] = reverse('api:bulk_job_launch', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
class BulkJobLaunchView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = UnifiedJob
|
||||
serializer_class = serializers.BulkJobLaunchSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
data = OrderedDict()
|
||||
data['detail'] = "Specify a list of unified job templates to launch alongside their launchtime parameters"
|
||||
return Response(data, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
bulkjob_serializer = serializers.BulkJobLaunchSerializer(data=request.data, context={'request': request})
|
||||
if bulkjob_serializer.is_valid():
|
||||
result = bulkjob_serializer.create(bulkjob_serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(bulkjob_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
|
||||
class BulkHostCreateView(GenericAPIView):
|
||||
permission_classes = [IsAuthenticated]
|
||||
model = Host
|
||||
serializer_class = serializers.BulkHostCreateSerializer
|
||||
allowed_methods = ['GET', 'POST', 'OPTIONS']
|
||||
|
||||
def get(self, request):
|
||||
return Response({"detail": "Bulk create hosts with this endpoint"}, status=status.HTTP_200_OK)
|
||||
|
||||
def post(self, request):
|
||||
serializer = serializers.BulkHostCreateSerializer(data=request.data, context={'request': request})
|
||||
if serializer.is_valid():
|
||||
result = serializer.create(serializer.validated_data)
|
||||
return Response(result, status=status.HTTP_201_CREATED)
|
||||
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
||||
@@ -121,6 +121,7 @@ class ApiVersionRootView(APIView):
|
||||
data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)
|
||||
data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)
|
||||
data['mesh_visualizer'] = reverse('api:mesh_visualizer_view', request=request)
|
||||
data['bulk'] = reverse('api:bulk', request=request)
|
||||
return Response(data)
|
||||
|
||||
|
||||
|
||||
@@ -588,17 +588,39 @@ class InstanceAccess(BaseAccess):
|
||||
|
||||
|
||||
class InstanceGroupAccess(BaseAccess):
|
||||
"""
|
||||
I can see Instance Groups when I am:
|
||||
- a superuser(system administrator)
|
||||
- at least read_role on the instance group
|
||||
I can edit Instance Groups when I am:
|
||||
- a superuser
|
||||
- admin role on the Instance group
|
||||
I can add/delete Instance Groups:
|
||||
- a superuser(system administrator)
|
||||
I can use Instance Groups when I have:
|
||||
- use_role on the instance group
|
||||
"""
|
||||
|
||||
model = InstanceGroup
|
||||
prefetch_related = ('instances',)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return InstanceGroup.objects.filter(organization__in=Organization.accessible_pk_qs(self.user, 'admin_role')).distinct()
|
||||
return self.model.accessible_objects(self.user, 'read_role')
|
||||
|
||||
@check_superuser
|
||||
def can_use(self, obj):
|
||||
return self.user in obj.use_role
|
||||
|
||||
def can_add(self, data):
|
||||
return self.user.is_superuser
|
||||
|
||||
@check_superuser
|
||||
def can_change(self, obj, data):
|
||||
return self.user.is_superuser
|
||||
return self.can_admin(obj)
|
||||
|
||||
@check_superuser
|
||||
def can_admin(self, obj):
|
||||
return self.user in obj.admin_role
|
||||
|
||||
def can_delete(self, obj):
|
||||
if obj.name in [settings.DEFAULT_EXECUTION_QUEUE_NAME, settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME]:
|
||||
@@ -845,7 +867,7 @@ class OrganizationAccess(NotificationAttachMixin, BaseAccess):
|
||||
return RoleAccess(self.user).can_attach(rel_role, sub_obj, 'members', *args, **kwargs)
|
||||
|
||||
if relationship == "instance_groups":
|
||||
if self.user.is_superuser:
|
||||
if self.user in obj.admin_role and self.user in sub_obj.use_role:
|
||||
return True
|
||||
return False
|
||||
return super(OrganizationAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
@@ -934,7 +956,7 @@ class InventoryAccess(BaseAccess):
|
||||
|
||||
def can_attach(self, obj, sub_obj, relationship, *args, **kwargs):
|
||||
if relationship == "instance_groups":
|
||||
if self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role:
|
||||
if self.user in sub_obj.use_role and self.user in obj.admin_role:
|
||||
return True
|
||||
return False
|
||||
return super(InventoryAccess, self).can_attach(obj, sub_obj, relationship, *args, **kwargs)
|
||||
@@ -1671,11 +1693,12 @@ class JobTemplateAccess(NotificationAttachMixin, UnifiedCredentialsMixin, BaseAc
|
||||
return self.user.is_superuser or self.user in obj.admin_role
|
||||
|
||||
@check_superuser
|
||||
# object here is the job template. sub_object here is what is being attached
|
||||
def can_attach(self, obj, sub_obj, relationship, data, skip_sub_obj_read_check=False):
|
||||
if relationship == "instance_groups":
|
||||
if not obj.organization:
|
||||
return False
|
||||
return self.user.can_access(type(sub_obj), "read", sub_obj) and self.user in obj.organization.admin_role
|
||||
return self.user in sub_obj.use_role and self.user in obj.admin_role
|
||||
return super(JobTemplateAccess, self).can_attach(obj, sub_obj, relationship, data, skip_sub_obj_read_check=skip_sub_obj_read_check)
|
||||
|
||||
@check_superuser
|
||||
@@ -1852,8 +1875,6 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
def _related_filtered_queryset(self, cls):
|
||||
if cls is Label:
|
||||
return LabelAccess(self.user).filtered_queryset()
|
||||
elif cls is InstanceGroup:
|
||||
return InstanceGroupAccess(self.user).filtered_queryset()
|
||||
else:
|
||||
return cls._accessible_pk_qs(cls, self.user, 'use_role')
|
||||
|
||||
@@ -1865,6 +1886,7 @@ class JobLaunchConfigAccess(UnifiedCredentialsMixin, BaseAccess):
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data, template=None):
|
||||
# WARNING: duplicated with BulkJobLaunchSerializer, check when changing permission levels
|
||||
# This is a special case, we don't check related many-to-many elsewhere
|
||||
# launch RBAC checks use this
|
||||
if 'reference_obj' in data:
|
||||
@@ -1997,7 +2019,16 @@ class WorkflowJobNodeAccess(BaseAccess):
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return self.model.objects.filter(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
return self.model.objects.filter(
|
||||
Q(workflow_job__unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(workflow_job__organization__in=Organization.objects.filter(Q(admin_role__members=self.user)))
|
||||
)
|
||||
|
||||
def can_read(self, obj):
|
||||
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
|
||||
if obj.workflow_job.is_bulk_job and obj.workflow_job.created_by_id == self.user.id:
|
||||
return True
|
||||
return super().can_read(obj)
|
||||
|
||||
@check_superuser
|
||||
def can_add(self, data):
|
||||
@@ -2123,7 +2154,16 @@ class WorkflowJobAccess(BaseAccess):
|
||||
)
|
||||
|
||||
def filtered_queryset(self):
|
||||
return WorkflowJob.objects.filter(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
return WorkflowJob.objects.filter(
|
||||
Q(unified_job_template__in=UnifiedJobTemplate.accessible_pk_qs(self.user, 'read_role'))
|
||||
| Q(organization__in=Organization.objects.filter(Q(admin_role__members=self.user)), is_bulk_job=True)
|
||||
)
|
||||
|
||||
def can_read(self, obj):
|
||||
"""Overriding this opens up detail view access for bulk jobs, where the workflow job has no associated workflow job template."""
|
||||
if obj.is_bulk_job and obj.created_by_id == self.user.id:
|
||||
return True
|
||||
return super().can_read(obj)
|
||||
|
||||
def can_add(self, data):
|
||||
# Old add-start system for launching jobs is being depreciated, and
|
||||
|
||||
@@ -233,11 +233,13 @@ def projects_by_scm_type(since, **kwargs):
|
||||
return counts
|
||||
|
||||
|
||||
@register('instance_info', '1.2', description=_('Cluster topology and capacity'))
|
||||
@register('instance_info', '1.3', description=_('Cluster topology and capacity'))
|
||||
def instance_info(since, include_hostnames=False, **kwargs):
|
||||
info = {}
|
||||
# Use same method that the TaskManager does to compute consumed capacity without querying all running jobs for each Instance
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled'])
|
||||
tm_models = TaskManagerModels.init_with_consumed_capacity(
|
||||
instance_fields=['uuid', 'version', 'capacity', 'cpu', 'memory', 'managed_by_policy', 'enabled', 'node_type']
|
||||
)
|
||||
for tm_instance in tm_models.instances.instances_by_hostname.values():
|
||||
instance = tm_instance.obj
|
||||
instance_info = {
|
||||
|
||||
@@ -282,6 +282,16 @@ register(
|
||||
placeholder={'HTTP_PROXY': 'myproxy.local:8080'},
|
||||
)
|
||||
|
||||
register(
|
||||
'AWX_RUNNER_KEEPALIVE_SECONDS',
|
||||
field_class=fields.IntegerField,
|
||||
label=_('K8S Ansible Runner Keep-Alive Message Interval'),
|
||||
help_text=_('Only applies to jobs running in a Container Group. If not 0, send a message every so-many seconds to keep connection open.'),
|
||||
category=_('Jobs'),
|
||||
category_slug='jobs',
|
||||
placeholder=240, # intended to be under common 5 minute idle timeout
|
||||
)
|
||||
|
||||
register(
|
||||
'GALAXY_TASK_ENV',
|
||||
field_class=fields.KeyValueField,
|
||||
@@ -765,6 +775,26 @@ register(
|
||||
help_text=_('Indicates whether the instance is part of a kubernetes-based deployment.'),
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_JOB_MAX_LAUNCH',
|
||||
field_class=fields.IntegerField,
|
||||
default=100,
|
||||
label=_('Max jobs to allow bulk jobs to launch'),
|
||||
help_text=_('Max jobs to allow bulk jobs to launch'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
register(
|
||||
'BULK_HOST_MAX_CREATE',
|
||||
field_class=fields.IntegerField,
|
||||
default=100,
|
||||
label=_('Max number of hosts to allow to be created in a single bulk action'),
|
||||
help_text=_('Max number of hosts to allow to be created in a single bulk action'),
|
||||
category=_('Bulk Actions'),
|
||||
category_slug='bulk',
|
||||
)
|
||||
|
||||
|
||||
def logging_validate(serializer, attrs):
|
||||
if not serializer.instance or not hasattr(serializer.instance, 'LOG_AGGREGATOR_HOST') or not hasattr(serializer.instance, 'LOG_AGGREGATOR_TYPE'):
|
||||
|
||||
@@ -49,7 +49,10 @@ def tss_backend(**kwargs):
|
||||
secret_dict = secret_server.get_secret(kwargs['secret_id'])
|
||||
secret = ServerSecret(**secret_dict)
|
||||
|
||||
return secret.fields[kwargs['secret_field']].value
|
||||
if isinstance(secret.fields[kwargs['secret_field']].value, str) == False:
|
||||
return secret.fields[kwargs['secret_field']].value.text
|
||||
else:
|
||||
return secret.fields[kwargs['secret_field']].value
|
||||
|
||||
|
||||
tss_plugin = CredentialPlugin(
|
||||
|
||||
@@ -70,7 +70,7 @@ def reap_waiting(instance=None, status='failed', job_explanation=None, grace_per
|
||||
reap_job(j, status, job_explanation=job_explanation)
|
||||
|
||||
|
||||
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None):
|
||||
def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=None, ref_time=None):
|
||||
"""
|
||||
Reap all jobs in running for this instance.
|
||||
"""
|
||||
@@ -80,7 +80,7 @@ def reap(instance=None, status='failed', job_explanation=None, excluded_uuids=No
|
||||
hostname = instance.hostname
|
||||
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
|
||||
jobs = UnifiedJob.objects.filter(
|
||||
Q(status='running') & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
Q(status='running', modified__lte=ref_time) & (Q(execution_node=hostname) | Q(controller_node=hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id)
|
||||
)
|
||||
if excluded_uuids:
|
||||
jobs = jobs.exclude(celery_task_id__in=excluded_uuids)
|
||||
|
||||
@@ -851,6 +851,7 @@ class Command(BaseCommand):
|
||||
logger.info('Updating inventory %d: %s' % (inventory.pk, inventory.name))
|
||||
|
||||
# Create ad-hoc inventory source and inventory update objects
|
||||
ee = get_default_execution_environment()
|
||||
with ignore_inventory_computed_fields():
|
||||
source = Command.get_source_absolute_path(raw_source)
|
||||
|
||||
@@ -860,14 +861,22 @@ class Command(BaseCommand):
|
||||
source_path=os.path.abspath(source),
|
||||
overwrite=bool(options.get('overwrite', False)),
|
||||
overwrite_vars=bool(options.get('overwrite_vars', False)),
|
||||
execution_environment=ee,
|
||||
)
|
||||
inventory_update = inventory_source.create_inventory_update(
|
||||
_eager_fields=dict(status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd())
|
||||
_eager_fields=dict(
|
||||
status='running', job_args=json.dumps(sys.argv), job_env=dict(os.environ.items()), job_cwd=os.getcwd(), execution_environment=ee
|
||||
)
|
||||
)
|
||||
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
try:
|
||||
data = AnsibleInventoryLoader(source=source, verbosity=verbosity).load()
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
|
||||
logger.debug('Finished loading from source: %s', source)
|
||||
except SystemExit:
|
||||
logger.debug("Error occurred while running ansible-inventory")
|
||||
inventory_update.cancel()
|
||||
sys.exit(1)
|
||||
|
||||
status, tb, exc = 'error', '', None
|
||||
try:
|
||||
|
||||
17
awx/main/migrations/0175_workflowjob_is_bulk_job.py
Normal file
17
awx/main/migrations/0175_workflowjob_is_bulk_job.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# Generated by Django 3.2.16 on 2023-01-05 15:39
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0174_ensure_org_ee_admin_roles'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='workflowjob',
|
||||
name='is_bulk_job',
|
||||
field=models.BooleanField(default=False),
|
||||
),
|
||||
]
|
||||
32
awx/main/migrations/0176_inventorysource_scm_branch.py
Normal file
32
awx/main/migrations/0176_inventorysource_scm_branch.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# Generated by Django 3.2.16 on 2023-03-03 20:44
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0175_workflowjob_is_bulk_job'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='inventorysource',
|
||||
name='scm_branch',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.',
|
||||
max_length=1024,
|
||||
),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='inventoryupdate',
|
||||
name='scm_branch',
|
||||
field=models.CharField(
|
||||
blank=True,
|
||||
default='',
|
||||
help_text='Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.',
|
||||
max_length=1024,
|
||||
),
|
||||
),
|
||||
]
|
||||
48
awx/main/migrations/0177_instance_group_role_addition.py
Normal file
48
awx/main/migrations/0177_instance_group_role_addition.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-17 02:45
|
||||
|
||||
import awx.main.fields
|
||||
from django.db import migrations
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
dependencies = [
|
||||
('main', '0176_inventorysource_scm_branch'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='admin_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
parent_role=['singleton:system_administrator'],
|
||||
related_name='+',
|
||||
to='main.role',
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='read_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False,
|
||||
null='True',
|
||||
on_delete=django.db.models.deletion.CASCADE,
|
||||
parent_role=['singleton:system_auditor', 'use_role', 'admin_role'],
|
||||
related_name='+',
|
||||
to='main.role',
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='instancegroup',
|
||||
name='use_role',
|
||||
field=awx.main.fields.ImplicitRoleField(
|
||||
editable=False, null='True', on_delete=django.db.models.deletion.CASCADE, parent_role=['admin_role'], related_name='+', to='main.role'
|
||||
),
|
||||
preserve_default='True',
|
||||
),
|
||||
]
|
||||
18
awx/main/migrations/0178_instance_group_admin_migration.py
Normal file
18
awx/main/migrations/0178_instance_group_admin_migration.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 3.2.16 on 2023-02-17 02:45
|
||||
|
||||
from django.db import migrations
|
||||
from awx.main.migrations import _rbac as rbac
|
||||
from awx.main.migrations import _migration_utils as migration_utils
|
||||
from awx.main.migrations import _OrgAdmin_to_use_ig as oamigrate
|
||||
from awx.main.migrations import ActivityStreamDisabledMigration
|
||||
|
||||
|
||||
class Migration(ActivityStreamDisabledMigration):
|
||||
dependencies = [
|
||||
('main', '0177_instance_group_role_addition'),
|
||||
]
|
||||
operations = [
|
||||
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
|
||||
migrations.RunPython(rbac.create_roles),
|
||||
migrations.RunPython(oamigrate.migrate_org_admin_to_use),
|
||||
]
|
||||
20
awx/main/migrations/_OrgAdmin_to_use_ig.py
Normal file
20
awx/main/migrations/_OrgAdmin_to_use_ig.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
from awx.main.models import Organization
|
||||
|
||||
logger = logging.getLogger('awx.main.migrations')
|
||||
|
||||
|
||||
def migrate_org_admin_to_use(apps, schema_editor):
|
||||
logger.info('Initiated migration from Org admin to use role')
|
||||
roles_added = 0
|
||||
for org in Organization.objects.prefetch_related('admin_role__members').iterator():
|
||||
igs = list(org.instance_groups.all())
|
||||
if not igs:
|
||||
continue
|
||||
for admin in org.admin_role.members.filter(is_superuser=False):
|
||||
for ig in igs:
|
||||
ig.use_role.members.add(admin)
|
||||
roles_added += 1
|
||||
if roles_added:
|
||||
logger.info(f'Migration converted {roles_added} from organization admin to use role')
|
||||
@@ -29,6 +29,7 @@ def create_roles(apps, schema_editor):
|
||||
'Project',
|
||||
'Credential',
|
||||
'JobTemplate',
|
||||
'InstanceGroup',
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
@@ -17,15 +17,20 @@ from django.db.models import Sum
|
||||
import redis
|
||||
from solo.models import SingletonModel
|
||||
|
||||
# AWX
|
||||
from awx import __version__ as awx_application_version
|
||||
from awx.api.versioning import reverse
|
||||
from awx.main.fields import JSONBlob
|
||||
from awx.main.fields import JSONBlob, ImplicitRoleField
|
||||
from awx.main.managers import InstanceManager, UUID_DEFAULT
|
||||
from awx.main.constants import JOB_FOLDER_PREFIX
|
||||
from awx.main.models.base import BaseModel, HasEditsMixin, prevent_search
|
||||
from awx.main.models.rbac import (
|
||||
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
)
|
||||
from awx.main.models.unified_jobs import UnifiedJob
|
||||
from awx.main.utils.common import get_corrected_cpu, get_cpu_effective_capacity, get_corrected_memory, get_mem_effective_capacity
|
||||
from awx.main.models.mixins import RelatedJobsMixin
|
||||
from awx.main.models.mixins import RelatedJobsMixin, ResourceMixin
|
||||
|
||||
# ansible-runner
|
||||
from ansible_runner.utils.capacity import get_cpu_count, get_mem_in_bytes
|
||||
@@ -352,7 +357,7 @@ class Instance(HasPolicyEditsMixin, BaseModel):
|
||||
self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
|
||||
|
||||
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin, ResourceMixin):
|
||||
"""A model representing a Queue/Group of AWX Instances."""
|
||||
|
||||
name = models.CharField(max_length=250, unique=True)
|
||||
@@ -379,6 +384,24 @@ class InstanceGroup(HasPolicyEditsMixin, BaseModel, RelatedJobsMixin):
|
||||
default='',
|
||||
)
|
||||
)
|
||||
admin_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
|
||||
]
|
||||
)
|
||||
use_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
'admin_role',
|
||||
]
|
||||
)
|
||||
read_role = ImplicitRoleField(
|
||||
parent_role=[
|
||||
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
|
||||
'use_role',
|
||||
'admin_role',
|
||||
]
|
||||
)
|
||||
|
||||
max_concurrent_jobs = models.IntegerField(default=0, help_text=_("Maximum number of concurrent jobs to run on this group. Zero means no limit."))
|
||||
max_forks = models.IntegerField(default=0, help_text=_("Max forks to execute on this group. Zero means no limit."))
|
||||
policy_instance_percentage = models.IntegerField(default=0, help_text=_("Percentage of Instances to automatically assign to this group"))
|
||||
|
||||
@@ -872,6 +872,12 @@ class InventorySourceOptions(BaseModel):
|
||||
default='',
|
||||
help_text=_('Inventory source variables in YAML or JSON format.'),
|
||||
)
|
||||
scm_branch = models.CharField(
|
||||
max_length=1024,
|
||||
default='',
|
||||
blank=True,
|
||||
help_text=_('Inventory source SCM branch. Project default used if blank. Only allowed if project allow_override field is set to true.'),
|
||||
)
|
||||
enabled_var = models.TextField(
|
||||
blank=True,
|
||||
default='',
|
||||
|
||||
@@ -650,6 +650,7 @@ class WorkflowJob(UnifiedJob, WorkflowJobOptions, SurveyJobMixin, JobNotificatio
|
||||
help_text=_("If automatically created for a sliced job run, the job template " "the workflow job was created from."),
|
||||
)
|
||||
is_sliced_job = models.BooleanField(default=False)
|
||||
is_bulk_job = models.BooleanField(default=False)
|
||||
|
||||
def _set_default_dependencies_processed(self):
|
||||
self.dependencies_processed = True
|
||||
|
||||
@@ -85,6 +85,8 @@ class RunnerCallback:
|
||||
# which generate job events from two 'streams':
|
||||
# ansible-inventory and the awx.main.commands.inventory_import
|
||||
# logger
|
||||
if event_data.get('event') == 'keepalive':
|
||||
return
|
||||
|
||||
if event_data.get(self.event_data_key, None):
|
||||
if self.event_data_key != 'job_id':
|
||||
|
||||
@@ -759,7 +759,7 @@ class SourceControlMixin(BaseTask):
|
||||
|
||||
def sync_and_copy(self, project, private_data_dir, scm_branch=None):
|
||||
self.acquire_lock(project, self.instance.id)
|
||||
|
||||
is_commit = False
|
||||
try:
|
||||
original_branch = None
|
||||
failed_reason = project.get_reason_if_failed()
|
||||
@@ -771,6 +771,7 @@ class SourceControlMixin(BaseTask):
|
||||
if os.path.exists(project_path):
|
||||
git_repo = git.Repo(project_path)
|
||||
if git_repo.head.is_detached:
|
||||
is_commit = True
|
||||
original_branch = git_repo.head.commit
|
||||
else:
|
||||
original_branch = git_repo.active_branch
|
||||
@@ -782,7 +783,11 @@ class SourceControlMixin(BaseTask):
|
||||
# for git project syncs, non-default branches can be problems
|
||||
# restore to branch the repo was on before this run
|
||||
try:
|
||||
original_branch.checkout()
|
||||
if is_commit:
|
||||
git_repo.head.set_commit(original_branch)
|
||||
git_repo.head.reset(index=True, working_tree=True)
|
||||
else:
|
||||
original_branch.checkout()
|
||||
except Exception:
|
||||
# this could have failed due to dirty tree, but difficult to predict all cases
|
||||
logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')
|
||||
@@ -1581,7 +1586,7 @@ class RunInventoryUpdate(SourceControlMixin, BaseTask):
|
||||
if inventory_update.source == 'scm':
|
||||
if not source_project:
|
||||
raise RuntimeError('Could not find project to run SCM inventory update from.')
|
||||
self.sync_and_copy(source_project, private_data_dir)
|
||||
self.sync_and_copy(source_project, private_data_dir, scm_branch=inventory_update.inventory_source.scm_branch)
|
||||
else:
|
||||
# If source is not SCM make an empty project directory, content is built inside inventory folder
|
||||
super(RunInventoryUpdate, self).build_project_dir(inventory_update, private_data_dir)
|
||||
|
||||
@@ -526,6 +526,10 @@ class AWXReceptorJob:
|
||||
pod_spec['spec']['containers'][0]['image'] = ee.image
|
||||
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
|
||||
|
||||
if settings.AWX_RUNNER_KEEPALIVE_SECONDS:
|
||||
pod_spec['spec']['containers'][0].setdefault('env', [])
|
||||
pod_spec['spec']['containers'][0]['env'].append({'name': 'ANSIBLE_RUNNER_KEEPALIVE_SECONDS', 'value': str(settings.AWX_RUNNER_KEEPALIVE_SECONDS)})
|
||||
|
||||
# Enforce EE Pull Policy
|
||||
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
|
||||
if self.task and self.task.instance.execution_environment:
|
||||
|
||||
@@ -581,7 +581,7 @@ def cluster_node_heartbeat(dispatch_time=None, worker_tasks=None):
|
||||
active_task_ids = []
|
||||
for task_list in worker_tasks.values():
|
||||
active_task_ids.extend(task_list)
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids)
|
||||
reaper.reap(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
if max(len(task_list) for task_list in worker_tasks.values()) <= 1:
|
||||
reaper.reap_waiting(instance=this_inst, excluded_uuids=active_task_ids, ref_time=datetime.fromisoformat(dispatch_time))
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ from awx.main.constants import JOB_VARIABLE_PREFIXES
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_subclass_types(rando):
|
||||
def test_subclass_types():
|
||||
assert set(UnifiedJobTemplate._submodels_with_roles()) == set(
|
||||
[
|
||||
ContentType.objects.get_for_model(JobTemplate).id,
|
||||
|
||||
311
awx/main/tests/functional/test_bulk.py
Normal file
311
awx/main/tests/functional/test_bulk.py
Normal file
@@ -0,0 +1,311 @@
|
||||
import pytest
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
from awx.api.versioning import reverse
|
||||
|
||||
from awx.main.models.jobs import JobTemplate
|
||||
from awx.main.models import Organization, Inventory, WorkflowJob, ExecutionEnvironment, Host
|
||||
from awx.main.scheduler import TaskManager
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('num_hosts, num_queries', [(1, 15), (10, 15)])
|
||||
def test_bulk_host_create_num_queries(organization, inventory, post, get, user, num_hosts, num_queries, django_assert_max_num_queries):
|
||||
'''
|
||||
If I am a...
|
||||
org admin
|
||||
inventory admin at org level
|
||||
admin of a particular inventory
|
||||
superuser
|
||||
|
||||
Bulk Host create should take under a certain number of queries
|
||||
'''
|
||||
inventory.organization = organization
|
||||
inventory_admin = user('inventory_admin', False)
|
||||
org_admin = user('org_admin', False)
|
||||
org_inv_admin = user('org_admin', False)
|
||||
superuser = user('admin', True)
|
||||
for u in [org_admin, org_inv_admin, inventory_admin]:
|
||||
organization.member_role.members.add(u)
|
||||
organization.admin_role.members.add(org_admin)
|
||||
organization.inventory_admin_role.members.add(org_inv_admin)
|
||||
inventory.admin_role.members.add(inventory_admin)
|
||||
|
||||
for u in [org_admin, inventory_admin, org_inv_admin, superuser]:
|
||||
hosts = [{'name': uuid4()} for i in range(num_hosts)]
|
||||
with django_assert_max_num_queries(num_queries):
|
||||
bulk_host_create_response = post(reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': hosts}, u, expect=201).data
|
||||
assert len(bulk_host_create_response['hosts']) == len(hosts), f"unexpected number of hosts created for user {u}"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_create_rbac(organization, inventory, post, get, user):
|
||||
'''
|
||||
If I am a...
|
||||
org admin
|
||||
inventory admin at org level
|
||||
admin of a particular invenotry
|
||||
... I can bulk add hosts
|
||||
|
||||
Everyone else cannot
|
||||
'''
|
||||
inventory.organization = organization
|
||||
inventory_admin = user('inventory_admin', False)
|
||||
org_admin = user('org_admin', False)
|
||||
org_inv_admin = user('org_admin', False)
|
||||
auditor = user('auditor', False)
|
||||
member = user('member', False)
|
||||
use_inv_member = user('member', False)
|
||||
for u in [org_admin, org_inv_admin, auditor, member, inventory_admin, use_inv_member]:
|
||||
organization.member_role.members.add(u)
|
||||
organization.admin_role.members.add(org_admin)
|
||||
organization.inventory_admin_role.members.add(org_inv_admin)
|
||||
inventory.admin_role.members.add(inventory_admin)
|
||||
inventory.use_role.members.add(use_inv_member)
|
||||
organization.auditor_role.members.add(auditor)
|
||||
|
||||
for indx, u in enumerate([org_admin, inventory_admin, org_inv_admin]):
|
||||
bulk_host_create_response = post(
|
||||
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar-{indx}'}]}, u, expect=201
|
||||
).data
|
||||
assert len(bulk_host_create_response['hosts']) == 1, f"unexpected number of hosts created for user {u}"
|
||||
assert Host.objects.filter(inventory__id=inventory.id)[0].name == 'foobar-0'
|
||||
|
||||
for indx, u in enumerate([member, auditor, use_inv_member]):
|
||||
bulk_host_create_response = post(
|
||||
reverse('api:bulk_host_create'), {'inventory': inventory.id, 'hosts': [{'name': f'foobar2-{indx}'}]}, u, expect=400
|
||||
).data
|
||||
assert bulk_host_create_response['__all__'][0] == f'Inventory with id {inventory.id} not found or lack permissions to add hosts.'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize('num_jobs, num_queries', [(1, 25), (10, 25)])
|
||||
def test_bulk_job_launch_queries(job_template, organization, inventory, project, post, get, user, num_jobs, num_queries, django_assert_max_num_queries):
|
||||
'''
|
||||
if I have access to the unified job template
|
||||
... I can launch the bulk job
|
||||
... and the number of queries should NOT scale with the number of jobs
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
org_admin = user('org_admin', False)
|
||||
jt = JobTemplate.objects.create(name='my-jt', ask_inventory_on_launch=True, project=project, playbook='helloworld.yml')
|
||||
organization.member_role.members.add(normal_user)
|
||||
organization.admin_role.members.add(org_admin)
|
||||
jt.execute_role.members.add(normal_user)
|
||||
inventory.use_role.members.add(normal_user)
|
||||
jt.save()
|
||||
inventory.save()
|
||||
jobs = [{'unified_job_template': jt.id, 'inventory': inventory.id} for _ in range(num_jobs)]
|
||||
|
||||
# This is not working, we need to figure that out if we want to include tests for more jobs
|
||||
# with mock.patch('awx.api.serializers.settings.BULK_JOB_MAX_LAUNCH', num_jobs + 1):
|
||||
with django_assert_max_num_queries(num_queries):
|
||||
bulk_job_launch_response = post(reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': jobs}, normal_user, expect=201).data
|
||||
|
||||
# Run task manager so the workflow job nodes actually spawn
|
||||
TaskManager().schedule()
|
||||
|
||||
for u in (org_admin, normal_user):
|
||||
bulk_job = get(bulk_job_launch_response['url'], u, expect=200).data
|
||||
assert organization.id == bulk_job['summary_fields']['organization']['id']
|
||||
resp = get(bulk_job_launch_response['related']['workflow_nodes'], u)
|
||||
assert resp.data['count'] == num_jobs
|
||||
for item in resp.data['results']:
|
||||
assert item["unified_job_template"] == jt.id
|
||||
assert item["inventory"] == inventory.id
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_launch_no_access_to_job_template(job_template, organization, inventory, project, credential, post, get, user):
|
||||
'''
|
||||
if I don't have access to the unified job templare
|
||||
... I can't launch the bulk job
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
jt.save()
|
||||
organization.member_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}]}, normal_user, expect=400
|
||||
).data
|
||||
assert bulk_job_launch_response['__all__'][0] == f'Job Templates {{{jt.id}}} not found or you don\'t have permissions to access it'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_launch_no_org_assigned(job_template, organization, inventory, project, credential, post, get, user):
|
||||
'''
|
||||
if I am not part of any organization...
|
||||
... I can't launch the bulk job
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
jt.save()
|
||||
jt.execute_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}]}, normal_user, expect=400
|
||||
).data
|
||||
assert bulk_job_launch_response['__all__'][0] == 'User not part of any organization, please assign an organization to assign to the bulk job'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_launch_multiple_org_assigned(job_template, organization, inventory, project, credential, post, get, user):
|
||||
'''
|
||||
if I am part of multiple organization...
|
||||
and if I do not provide org at the launch time
|
||||
... I can't launch the bulk job
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
org1.member_role.members.add(normal_user)
|
||||
org2.member_role.members.add(normal_user)
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
jt.save()
|
||||
jt.execute_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}]}, normal_user, expect=400
|
||||
).data
|
||||
assert bulk_job_launch_response['__all__'][0] == 'User has permission to multiple Organizations, please set one of them in the request'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_launch_specific_org(job_template, organization, inventory, project, credential, post, get, user):
|
||||
'''
|
||||
if I am part of multiple organization...
|
||||
and if I provide org at the launch time
|
||||
... I can launch the bulk job
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
org1.member_role.members.add(normal_user)
|
||||
org2.member_role.members.add(normal_user)
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
jt.save()
|
||||
jt.execute_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id}], 'organization': org1.id}, normal_user, expect=201
|
||||
).data
|
||||
bulk_job_id = bulk_job_launch_response['id']
|
||||
bulk_job_obj = WorkflowJob.objects.filter(id=bulk_job_id, is_bulk_job=True).first()
|
||||
assert org1.id == bulk_job_obj.organization.id
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_launch_inventory_no_access(job_template, organization, inventory, project, credential, post, get, user):
|
||||
'''
|
||||
if I don't have access to the inventory...
|
||||
and if I try to use it at the launch time
|
||||
... I can't launch the bulk job
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
org2 = Organization.objects.create(name='foo2')
|
||||
jt = JobTemplate.objects.create(name='my-jt', inventory=inventory, project=project, playbook='helloworld.yml')
|
||||
jt.save()
|
||||
org1.member_role.members.add(normal_user)
|
||||
inv = Inventory.objects.create(name='inv1', organization=org2)
|
||||
jt.execute_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id, 'inventory': inv.id}]}, normal_user, expect=400
|
||||
).data
|
||||
assert bulk_job_launch_response['__all__'][0] == f'Inventories {{{inv.id}}} not found or you don\'t have permissions to access it'
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_inventory_prompt(job_template, organization, inventory, project, credential, post, get, user):
|
||||
'''
|
||||
Job template has an inventory set as prompt_on_launch
|
||||
and if I provide the inventory as a parameter in bulk job
|
||||
... job uses that inventory
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
org1 = Organization.objects.create(name='foo1')
|
||||
jt = JobTemplate.objects.create(name='my-jt', ask_inventory_on_launch=True, project=project, playbook='helloworld.yml')
|
||||
jt.save()
|
||||
org1.member_role.members.add(normal_user)
|
||||
inv = Inventory.objects.create(name='inv1', organization=org1)
|
||||
jt.execute_role.members.add(normal_user)
|
||||
inv.use_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'), {'name': 'Bulk Job Launch', 'jobs': [{'unified_job_template': jt.id, 'inventory': inv.id}]}, normal_user, expect=201
|
||||
).data
|
||||
bulk_job_id = bulk_job_launch_response['id']
|
||||
node = WorkflowJob.objects.get(id=bulk_job_id).workflow_job_nodes.all().order_by('created')
|
||||
assert inv.id == node[0].inventory.id
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_set_all_prompt(job_template, organization, inventory, project, credentialtype_ssh, post, get, user):
|
||||
'''
|
||||
Job template has many fields set as prompt_on_launch
|
||||
and if I provide all those fields as a parameter in bulk job
|
||||
... job uses them
|
||||
'''
|
||||
normal_user = user('normal_user', False)
|
||||
jt = JobTemplate.objects.create(
|
||||
name='my-jt',
|
||||
ask_inventory_on_launch=True,
|
||||
ask_diff_mode_on_launch=True,
|
||||
ask_job_type_on_launch=True,
|
||||
ask_verbosity_on_launch=True,
|
||||
ask_execution_environment_on_launch=True,
|
||||
ask_forks_on_launch=True,
|
||||
ask_job_slice_count_on_launch=True,
|
||||
ask_timeout_on_launch=True,
|
||||
ask_variables_on_launch=True,
|
||||
ask_scm_branch_on_launch=True,
|
||||
ask_limit_on_launch=True,
|
||||
ask_skip_tags_on_launch=True,
|
||||
ask_tags_on_launch=True,
|
||||
project=project,
|
||||
playbook='helloworld.yml',
|
||||
)
|
||||
jt.save()
|
||||
organization.member_role.members.add(normal_user)
|
||||
inv = Inventory.objects.create(name='inv1', organization=organization)
|
||||
ee = ExecutionEnvironment.objects.create(name='test-ee', image='quay.io/foo/bar')
|
||||
jt.execute_role.members.add(normal_user)
|
||||
inv.use_role.members.add(normal_user)
|
||||
bulk_job_launch_response = post(
|
||||
reverse('api:bulk_job_launch'),
|
||||
{
|
||||
'name': 'Bulk Job Launch',
|
||||
'jobs': [
|
||||
{
|
||||
'unified_job_template': jt.id,
|
||||
'inventory': inv.id,
|
||||
'diff_mode': True,
|
||||
'job_type': 'check',
|
||||
'verbosity': 3,
|
||||
'execution_environment': ee.id,
|
||||
'forks': 1,
|
||||
'job_slice_count': 1,
|
||||
'timeout': 200,
|
||||
'extra_data': {'prompted_key': 'prompted_val'},
|
||||
'scm_branch': 'non_dev',
|
||||
'limit': 'kansas',
|
||||
'skip_tags': 'foobar',
|
||||
'job_tags': 'untagged',
|
||||
}
|
||||
],
|
||||
},
|
||||
normal_user,
|
||||
expect=201,
|
||||
).data
|
||||
bulk_job_id = bulk_job_launch_response['id']
|
||||
node = WorkflowJob.objects.get(id=bulk_job_id).workflow_job_nodes.all().order_by('created')
|
||||
assert node[0].inventory.id == inv.id
|
||||
assert node[0].diff_mode == True
|
||||
assert node[0].job_type == 'check'
|
||||
assert node[0].verbosity == 3
|
||||
assert node[0].execution_environment.id == ee.id
|
||||
assert node[0].forks == 1
|
||||
assert node[0].job_slice_count == 1
|
||||
assert node[0].timeout == 200
|
||||
assert node[0].extra_data == {'prompted_key': 'prompted_val'}
|
||||
assert node[0].scm_branch == 'non_dev'
|
||||
assert node[0].limit == 'kansas'
|
||||
assert node[0].skip_tags == 'foobar'
|
||||
assert node[0].job_tags == 'untagged'
|
||||
@@ -235,6 +235,7 @@ class TestAutoScaling:
|
||||
assert len(self.pool) == 10
|
||||
assert self.pool.workers[0].messages_sent == 2
|
||||
|
||||
@pytest.mark.timeout(20)
|
||||
def test_lost_worker_autoscale(self):
|
||||
# if a worker exits, it should be replaced automatically up to min_workers
|
||||
self.pool.init_workers(ResultWriter().work_loop, multiprocessing.Queue())
|
||||
@@ -243,8 +244,8 @@ class TestAutoScaling:
|
||||
assert len(self.pool) == 2
|
||||
assert not self.pool.should_grow
|
||||
alive_pid = self.pool.workers[1].pid
|
||||
self.pool.workers[0].process.terminate()
|
||||
time.sleep(2) # wait a moment for sigterm
|
||||
self.pool.workers[0].process.kill()
|
||||
self.pool.workers[0].process.join() # waits for process to full terminate
|
||||
|
||||
# clean up and the dead worker
|
||||
self.pool.cleanup()
|
||||
@@ -336,6 +337,8 @@ class TestTaskPublisher:
|
||||
|
||||
|
||||
yesterday = tz_now() - datetime.timedelta(days=1)
|
||||
minute = tz_now() - datetime.timedelta(seconds=120)
|
||||
now = tz_now()
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -344,8 +347,8 @@ class TestJobReaper(object):
|
||||
'status, execution_node, controller_node, modified, fail',
|
||||
[
|
||||
('running', '', '', None, False), # running, not assigned to the instance
|
||||
('running', 'awx', '', None, True), # running, has the instance as its execution_node
|
||||
('running', '', 'awx', None, True), # running, has the instance as its controller_node
|
||||
('running', 'awx', '', minute, True), # running, has the instance as its execution_node
|
||||
('running', '', 'awx', minute, True), # running, has the instance as its controller_node
|
||||
('waiting', '', '', None, False), # waiting, not assigned to the instance
|
||||
('waiting', 'awx', '', None, False), # waiting, was edited less than a minute ago
|
||||
('waiting', '', 'awx', None, False), # waiting, was edited less than a minute ago
|
||||
@@ -367,7 +370,7 @@ class TestJobReaper(object):
|
||||
# we have to edit the modification time _without_ calling save()
|
||||
# (because .save() overwrites it to _now_)
|
||||
Job.objects.filter(id=j.id).update(modified=modified)
|
||||
reaper.reap(i)
|
||||
reaper.reap(i, ref_time=now)
|
||||
reaper.reap_waiting(i)
|
||||
job = Job.objects.first()
|
||||
if fail:
|
||||
@@ -378,13 +381,15 @@ class TestJobReaper(object):
|
||||
assert job.status == status
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'excluded_uuids, fail',
|
||||
'excluded_uuids, fail, modified',
|
||||
[
|
||||
(['abc123'], False),
|
||||
([], True),
|
||||
(['abc123'], False, None),
|
||||
([], False, None),
|
||||
([], True, minute),
|
||||
],
|
||||
)
|
||||
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail):
|
||||
def test_do_not_reap_excluded_uuids(self, excluded_uuids, fail, modified):
|
||||
"""Modified Test to account for ref_time in reap()"""
|
||||
i = Instance(hostname='awx')
|
||||
i.save()
|
||||
j = Job(
|
||||
@@ -395,10 +400,13 @@ class TestJobReaper(object):
|
||||
celery_task_id='abc123',
|
||||
)
|
||||
j.save()
|
||||
if modified:
|
||||
Job.objects.filter(id=j.id).update(modified=modified)
|
||||
|
||||
# if the UUID is excluded, don't reap it
|
||||
reaper.reap(i, excluded_uuids=excluded_uuids)
|
||||
reaper.reap(i, excluded_uuids=excluded_uuids, ref_time=now)
|
||||
job = Job.objects.first()
|
||||
|
||||
if fail:
|
||||
assert job.status == 'failed'
|
||||
assert 'marked as failed' in job.job_explanation
|
||||
@@ -411,6 +419,6 @@ class TestJobReaper(object):
|
||||
i.save()
|
||||
j = WorkflowJob(status='running', execution_node='awx')
|
||||
j.save()
|
||||
reaper.reap(i)
|
||||
reaper.reap(i, ref_time=now)
|
||||
|
||||
assert WorkflowJob.objects.first().status == 'running'
|
||||
|
||||
@@ -99,12 +99,12 @@ def test_instance_dup(org_admin, organization, project, instance_factory, instan
|
||||
list_response = get(reverse('api:instance_list'), user=system_auditor)
|
||||
api_num_instances_auditor = list(list_response.data.items())[0][1]
|
||||
|
||||
ig_all.read_role.members.add(org_admin)
|
||||
list_response2 = get(reverse('api:instance_list'), user=org_admin)
|
||||
api_num_instances_oa = list(list_response2.data.items())[0][1]
|
||||
|
||||
assert api_num_instances_auditor == actual_num_instances
|
||||
# Note: The org_admin will not see the default 'tower' node
|
||||
# (instance fixture) because it is not in its group, as expected
|
||||
# Note: The org_admin will not see instances unless at least read_role to the IG has been assigned
|
||||
assert api_num_instances_oa == (actual_num_instances - 1)
|
||||
|
||||
|
||||
|
||||
16
awx/main/tests/functional/test_org_admin_migration.py
Normal file
16
awx/main/tests/functional/test_org_admin_migration.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import pytest
|
||||
|
||||
from django.apps import apps
|
||||
|
||||
from awx.main.models import InstanceGroup
|
||||
from awx.main.migrations import _OrgAdmin_to_use_ig as orgadmin
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_migrate_admin_role(org_admin, organization):
|
||||
instance_group = InstanceGroup.objects.create(name='test')
|
||||
organization.admin_role.members.add(org_admin)
|
||||
organization.instance_groups.add(instance_group)
|
||||
orgadmin.migrate_org_admin_to_use(apps, None)
|
||||
assert org_admin in instance_group.use_role.members.all()
|
||||
assert instance_group.use_role.members.count() == 1
|
||||
@@ -6,7 +6,47 @@ from awx.main.access import (
|
||||
InventoryAccess,
|
||||
JobTemplateAccess,
|
||||
)
|
||||
from awx.main.models import Organization
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"obj_perm,allowed,readonly,partial", [("admin_role", True, True, True), ("use_role", False, True, True), ("read_role", False, True, False)]
|
||||
)
|
||||
def test_ig_role_base_visibility(default_instance_group, rando, obj_perm, allowed, partial, readonly):
|
||||
if obj_perm:
|
||||
getattr(default_instance_group, obj_perm).members.add(rando)
|
||||
|
||||
assert readonly == InstanceGroupAccess(rando).can_read(default_instance_group)
|
||||
assert partial == InstanceGroupAccess(rando).can_use(default_instance_group)
|
||||
assert not InstanceGroupAccess(rando).can_add(default_instance_group)
|
||||
assert allowed == InstanceGroupAccess(rando).can_admin(default_instance_group)
|
||||
assert allowed == InstanceGroupAccess(rando).can_change(default_instance_group, {'name': 'New Name'})
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@pytest.mark.parametrize(
|
||||
"obj_perm,subobj_perm,allowed", [('admin_role', 'use_role', True), ('admin_role', 'read_role', False), ('admin_role', 'admin_role', True)]
|
||||
)
|
||||
def test_ig_role_based_associability(default_instance_group, rando, organization, job_template_factory, obj_perm, subobj_perm, allowed):
|
||||
objects = job_template_factory('jt', organization=organization, project='p', inventory='i', credential='c')
|
||||
if obj_perm:
|
||||
getattr(objects.job_template, obj_perm).members.add(rando)
|
||||
getattr(objects.inventory, obj_perm).members.add(rando)
|
||||
getattr(objects.organization, obj_perm).members.add(rando)
|
||||
if subobj_perm:
|
||||
getattr(default_instance_group, subobj_perm).members.add(rando)
|
||||
|
||||
assert allowed == JobTemplateAccess(rando).can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
|
||||
assert allowed == InventoryAccess(rando).can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
|
||||
assert allowed == OrganizationAccess(rando).can_attach(objects.organization, default_instance_group, 'instance_groups', None)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ig_use_with_org_admin(default_instance_group, rando, org_admin):
|
||||
default_instance_group.use_role.members.add(rando)
|
||||
|
||||
assert list(InstanceGroupAccess(org_admin).get_queryset()) != [default_instance_group]
|
||||
assert list(InstanceGroupAccess(rando).get_queryset()) == [default_instance_group]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -24,7 +64,7 @@ def test_ig_admin_user_visibility(organization, default_instance_group, admin, s
|
||||
assert len(InstanceGroupAccess(system_auditor).get_queryset()) == 1
|
||||
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 0
|
||||
organization.instance_groups.add(default_instance_group)
|
||||
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 1
|
||||
assert len(InstanceGroupAccess(org_admin).get_queryset()) == 0
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
@@ -37,16 +77,6 @@ def test_ig_normal_user_associability(organization, default_instance_group, user
|
||||
assert not access.can_attach(organization, default_instance_group, 'instance_groups', None)
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_access_via_two_organizations(rando, default_instance_group):
|
||||
for org_name in ['org1', 'org2']:
|
||||
org = Organization.objects.create(name=org_name)
|
||||
org.instance_groups.add(default_instance_group)
|
||||
org.admin_role.members.add(rando)
|
||||
access = InstanceGroupAccess(rando)
|
||||
assert list(access.get_queryset()) == [default_instance_group]
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_ig_associability(organization, default_instance_group, admin, system_auditor, org_admin, org_member, job_template_factory):
|
||||
admin_access = OrganizationAccess(admin)
|
||||
@@ -72,7 +102,7 @@ def test_ig_associability(organization, default_instance_group, admin, system_au
|
||||
omember_access = InventoryAccess(org_member)
|
||||
|
||||
assert admin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
|
||||
assert oadmin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
|
||||
assert not oadmin_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
|
||||
assert not auditor_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
|
||||
assert not omember_access.can_attach(objects.inventory, default_instance_group, 'instance_groups', None)
|
||||
|
||||
@@ -82,6 +112,6 @@ def test_ig_associability(organization, default_instance_group, admin, system_au
|
||||
omember_access = JobTemplateAccess(org_member)
|
||||
|
||||
assert admin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
|
||||
assert oadmin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
|
||||
assert not oadmin_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
|
||||
assert not auditor_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
|
||||
assert not omember_access.can_attach(objects.job_template, default_instance_group, 'instance_groups', None)
|
||||
|
||||
@@ -148,7 +148,7 @@ class TestWorkflowJobTemplateNodeAccess:
|
||||
elif permission_type == 'instance_groups':
|
||||
sub_obj = InstanceGroup.objects.create()
|
||||
org = Organization.objects.create()
|
||||
org.admin_role.members.add(rando) # only admins can see IGs
|
||||
sub_obj.use_role.members.add(rando) # only admins can see IGs
|
||||
org.instance_groups.add(sub_obj)
|
||||
|
||||
access = WorkflowJobTemplateNodeAccess(rando)
|
||||
|
||||
@@ -18,7 +18,7 @@ class DistinctParametrize(object):
|
||||
|
||||
|
||||
@pytest.mark.survey
|
||||
class SurveyVariableValidation:
|
||||
class TestSurveyVariableValidation:
|
||||
def test_survey_answers_as_string(self, job_template_factory):
|
||||
objects = job_template_factory('job-template-with-survey', survey=[{'variable': 'var1', 'type': 'text'}], persisted=False)
|
||||
jt = objects.job_template
|
||||
@@ -57,7 +57,7 @@ class SurveyVariableValidation:
|
||||
accepted, rejected, errors = obj.accept_or_ignore_variables({"a": 5})
|
||||
assert rejected == {"a": 5}
|
||||
assert accepted == {}
|
||||
assert str(errors[0]) == "Value 5 for 'a' expected to be a string."
|
||||
assert str(errors['variables_needed_to_start'][0]) == "Value 5 for 'a' expected to be a string."
|
||||
|
||||
def test_job_template_survey_default_variable_validation(self, job_template_factory):
|
||||
objects = job_template_factory(
|
||||
@@ -88,7 +88,7 @@ class SurveyVariableValidation:
|
||||
|
||||
obj.survey_enabled = True
|
||||
accepted, _, errors = obj.accept_or_ignore_variables({"a": 2})
|
||||
assert accepted == {{"a": 2.0}}
|
||||
assert accepted == {"a": 2.0}
|
||||
assert not errors
|
||||
|
||||
|
||||
|
||||
@@ -11,11 +11,13 @@ from datetime import timedelta
|
||||
|
||||
|
||||
if "pytest" in sys.modules:
|
||||
IS_TESTING_MODE = True
|
||||
from unittest import mock
|
||||
|
||||
with mock.patch('__main__.__builtins__.dir', return_value=[]):
|
||||
import ldap
|
||||
else:
|
||||
IS_TESTING_MODE = False
|
||||
import ldap
|
||||
|
||||
|
||||
@@ -129,6 +131,13 @@ NAMED_URL_GRAPH = {}
|
||||
# Note: This setting may be overridden by database settings.
|
||||
SCHEDULE_MAX_JOBS = 10
|
||||
|
||||
# Bulk API related settings
|
||||
# Maximum number of jobs that can be launched in 1 bulk job
|
||||
BULK_JOB_MAX_LAUNCH = 100
|
||||
|
||||
# Maximum number of host that can be created in 1 bulk host create
|
||||
BULK_HOST_MAX_CREATE = 100
|
||||
|
||||
SITE_ID = 1
|
||||
|
||||
# Make this unique, and don't share it with anybody.
|
||||
@@ -929,6 +938,11 @@ AWX_RUNNER_OMIT_ENV_FILES = True
|
||||
# Allow ansible-runner to save ansible output (may cause performance issues)
|
||||
AWX_RUNNER_SUPPRESS_OUTPUT_FILE = True
|
||||
|
||||
# https://github.com/ansible/ansible-runner/pull/1191/files
|
||||
# Interval in seconds between the last message and keep-alive messages that
|
||||
# ansible-runner will send
|
||||
AWX_RUNNER_KEEPALIVE_SECONDS = 0
|
||||
|
||||
# Delete completed work units in receptor
|
||||
RECEPTOR_RELEASE_WORK = True
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ import { getLanguageWithoutRegionCode } from 'util/language';
|
||||
import Metrics from 'screens/Metrics';
|
||||
import SubscriptionEdit from 'screens/Setting/Subscription/SubscriptionEdit';
|
||||
import useTitle from 'hooks/useTitle';
|
||||
import { dynamicActivate, locales } from './i18nLoader';
|
||||
import { dynamicActivate } from './i18nLoader';
|
||||
import getRouteConfig from './routeConfig';
|
||||
import { SESSION_REDIRECT_URL } from './constants';
|
||||
|
||||
@@ -139,16 +139,15 @@ export function ProtectedRoute({ children, ...rest }) {
|
||||
function App() {
|
||||
const history = useHistory();
|
||||
const { hash, search, pathname } = useLocation();
|
||||
let language = getLanguageWithoutRegionCode(navigator);
|
||||
if (!Object.keys(locales).includes(language)) {
|
||||
// If there isn't a string catalog available for the browser's
|
||||
// preferred language, default to one that has strings.
|
||||
language = 'en';
|
||||
}
|
||||
const searchParams = Object.fromEntries(new URLSearchParams(search));
|
||||
const pseudolocalization =
|
||||
searchParams.pseudolocalization === 'true' || false;
|
||||
const language =
|
||||
searchParams.lang || getLanguageWithoutRegionCode(navigator) || 'en';
|
||||
|
||||
useEffect(() => {
|
||||
dynamicActivate(language);
|
||||
}, [language]);
|
||||
dynamicActivate(language, pseudolocalization);
|
||||
}, [language, pseudolocalization]);
|
||||
|
||||
useTitle();
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ class InstanceGroups extends Base {
|
||||
this.associateInstance = this.associateInstance.bind(this);
|
||||
this.disassociateInstance = this.disassociateInstance.bind(this);
|
||||
this.readInstanceOptions = this.readInstanceOptions.bind(this);
|
||||
this.readInstanceGroupOptions = this.readInstanceGroupOptions.bind(this);
|
||||
this.readInstances = this.readInstances.bind(this);
|
||||
this.readJobs = this.readJobs.bind(this);
|
||||
}
|
||||
@@ -33,6 +34,10 @@ class InstanceGroups extends Base {
|
||||
return this.http.options(`${this.baseUrl}${id}/instances/`);
|
||||
}
|
||||
|
||||
readInstanceGroupOptions(id) {
|
||||
return this.http.options(`${this.baseUrl}${id}/`);
|
||||
}
|
||||
|
||||
readJobs(id) {
|
||||
return this.http.get(`${this.baseUrl}${id}/jobs/`);
|
||||
}
|
||||
|
||||
@@ -6,7 +6,12 @@ import { useField } from 'formik';
|
||||
import styled from 'styled-components';
|
||||
import { Split, SplitItem, Button, Modal } from '@patternfly/react-core';
|
||||
import { ExpandArrowsAltIcon } from '@patternfly/react-icons';
|
||||
import { yamlToJson, jsonToYaml, isJsonString } from 'util/yaml';
|
||||
import {
|
||||
yamlToJson,
|
||||
jsonToYaml,
|
||||
isJsonString,
|
||||
parseVariableField,
|
||||
} from 'util/yaml';
|
||||
import { CheckboxField } from '../FormField';
|
||||
import MultiButtonToggle from '../MultiButtonToggle';
|
||||
import CodeEditor from './CodeEditor';
|
||||
@@ -37,36 +42,24 @@ function VariablesField({
|
||||
// track focus manually, because the Code Editor library doesn't wire
|
||||
// into Formik completely
|
||||
const [shouldValidate, setShouldValidate] = useState(false);
|
||||
const [mode, setMode] = useState(initialMode || YAML_MODE);
|
||||
const validate = useCallback(
|
||||
(value) => {
|
||||
if (!shouldValidate) {
|
||||
return undefined;
|
||||
}
|
||||
try {
|
||||
if (mode === YAML_MODE) {
|
||||
yamlToJson(value);
|
||||
} else {
|
||||
JSON.parse(value);
|
||||
}
|
||||
parseVariableField(value);
|
||||
} catch (error) {
|
||||
return error.message;
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
[shouldValidate, mode]
|
||||
[shouldValidate]
|
||||
);
|
||||
const [field, meta, helpers] = useField({ name, validate });
|
||||
|
||||
useEffect(() => {
|
||||
if (isJsonString(field.value)) {
|
||||
// mode's useState above couldn't be initialized to JSON_MODE because
|
||||
// the field value had to be defined below it
|
||||
setMode(JSON_MODE);
|
||||
onModeChange(JSON_MODE);
|
||||
helpers.setValue(JSON.stringify(JSON.parse(field.value), null, 2));
|
||||
}
|
||||
}, []); // eslint-disable-line react-hooks/exhaustive-deps
|
||||
const [mode, setMode] = useState(() =>
|
||||
isJsonString(field.value) ? JSON_MODE : initialMode || YAML_MODE
|
||||
);
|
||||
|
||||
useEffect(
|
||||
() => {
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
InventoriesAPI,
|
||||
ProjectsAPI,
|
||||
OrganizationsAPI,
|
||||
InstanceGroupsAPI,
|
||||
} from 'api';
|
||||
|
||||
export default function getResourceAccessConfig() {
|
||||
@@ -210,5 +211,32 @@ export default function getResourceAccessConfig() {
|
||||
fetchItems: (queryParams) => OrganizationsAPI.read(queryParams),
|
||||
fetchOptions: () => OrganizationsAPI.readOptions(),
|
||||
},
|
||||
{
|
||||
selectedResource: 'Instance Groups',
|
||||
label: t`Instance Groups`,
|
||||
searchColumns: [
|
||||
{
|
||||
name: t`Name`,
|
||||
key: 'name__icontains',
|
||||
isDefault: true,
|
||||
},
|
||||
{
|
||||
name: t`Created By (Username)`,
|
||||
key: 'created_by__username__icontains',
|
||||
},
|
||||
{
|
||||
name: t`Modified By (Username)`,
|
||||
key: 'modified_by__username__icontains',
|
||||
},
|
||||
],
|
||||
sortColumns: [
|
||||
{
|
||||
name: t`Name`,
|
||||
key: 'name',
|
||||
},
|
||||
],
|
||||
fetchItems: (queryParams) => InstanceGroupsAPI.read(queryParams),
|
||||
fetchOptions: () => InstanceGroupsAPI.readOptions(),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
/* eslint-disable-next-line import/prefer-default-export */
|
||||
export const JOB_TYPE_URL_SEGMENTS = {
|
||||
job: 'playbook',
|
||||
project_update: 'project',
|
||||
|
||||
@@ -27,8 +27,21 @@ i18n.loadLocaleData({
|
||||
* We do a dynamic import of just the catalog that we need
|
||||
* @param locale any locale string
|
||||
*/
|
||||
export async function dynamicActivate(locale) {
|
||||
export async function dynamicActivate(locale, pseudolocalization = false) {
|
||||
const { messages } = await import(`./locales/${locale}/messages`);
|
||||
|
||||
if (pseudolocalization) {
|
||||
Object.keys(messages).forEach((key) => {
|
||||
if (Array.isArray(messages[key])) {
|
||||
// t`Foo ${param}` -> ["Foo ", ['param']] => [">>", "Foo ", ['param'], "<<"]
|
||||
messages[key] = ['»', ...messages[key], '«'];
|
||||
} else {
|
||||
// simple string
|
||||
messages[key] = `»${messages[key]}«`;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
i18n.load(locale, messages);
|
||||
i18n.activate(locale);
|
||||
}
|
||||
|
||||
@@ -184,7 +184,6 @@ function getRouteConfig(userProfile = {}) {
|
||||
deleteRouteGroup('settings');
|
||||
deleteRoute('management_jobs');
|
||||
if (userProfile?.isOrgAdmin) return routeConfig;
|
||||
deleteRoute('instance_groups');
|
||||
deleteRoute('topology_view');
|
||||
deleteRoute('instances');
|
||||
if (!userProfile?.isNotificationAdmin) deleteRoute('notification_templates');
|
||||
|
||||
@@ -127,6 +127,7 @@ describe('getRouteConfig', () => {
|
||||
'/teams',
|
||||
'/credential_types',
|
||||
'/notification_templates',
|
||||
'/instance_groups',
|
||||
'/applications',
|
||||
'/execution_environments',
|
||||
]);
|
||||
@@ -150,6 +151,7 @@ describe('getRouteConfig', () => {
|
||||
'/users',
|
||||
'/teams',
|
||||
'/credential_types',
|
||||
'/instance_groups',
|
||||
'/applications',
|
||||
'/execution_environments',
|
||||
]);
|
||||
@@ -173,6 +175,7 @@ describe('getRouteConfig', () => {
|
||||
'/users',
|
||||
'/teams',
|
||||
'/credential_types',
|
||||
'/instance_groups',
|
||||
'/applications',
|
||||
'/execution_environments',
|
||||
]);
|
||||
@@ -201,6 +204,7 @@ describe('getRouteConfig', () => {
|
||||
'/teams',
|
||||
'/credential_types',
|
||||
'/notification_templates',
|
||||
'/instance_groups',
|
||||
'/applications',
|
||||
'/execution_environments',
|
||||
]);
|
||||
|
||||
@@ -21,9 +21,11 @@ function ContainerGroupEdit({ instanceGroup }) {
|
||||
result: initialPodSpec,
|
||||
} = useRequest(
|
||||
useCallback(async () => {
|
||||
const { data } = await InstanceGroupsAPI.readOptions();
|
||||
return data.actions.POST.pod_spec_override.default;
|
||||
}, []),
|
||||
const { data } = await InstanceGroupsAPI.readInstanceGroupOptions(
|
||||
instanceGroup.id
|
||||
);
|
||||
return data.actions.PUT.pod_spec_override.default;
|
||||
}, [instanceGroup.id]),
|
||||
{
|
||||
initialPodSpec: {},
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ function InventorySourceDetail({ inventorySource }) {
|
||||
source,
|
||||
source_path,
|
||||
source_vars,
|
||||
scm_branch,
|
||||
update_cache_timeout,
|
||||
update_on_launch,
|
||||
verbosity,
|
||||
@@ -233,6 +234,11 @@ function InventorySourceDetail({ inventorySource }) {
|
||||
helpText={helpText.subFormVerbosityFields}
|
||||
value={VERBOSITY()[verbosity]}
|
||||
/>
|
||||
<Detail
|
||||
label={t`Source Control Branch`}
|
||||
helpText={helpText.sourceControlBranch}
|
||||
value={scm_branch}
|
||||
/>
|
||||
<Detail
|
||||
label={t`Cache timeout`}
|
||||
value={`${update_cache_timeout} ${t`seconds`}`}
|
||||
|
||||
@@ -152,6 +152,7 @@ const getInventoryHelpTextStrings = () => ({
|
||||
},
|
||||
enabledVariableField: t`Retrieve the enabled state from the given dict of host variables.
|
||||
The enabled variable may be specified using dot notation, e.g: 'foo.bar'`,
|
||||
sourceControlBranch: t`Branch to use on inventory sync. Project default used if blank. Only allowed if project allow_override field is set to true.`,
|
||||
enabledValue: t`This field is ignored unless an Enabled Variable is set. If the enabled variable matches this value, the host will be enabled on import.`,
|
||||
hostFilter: t`Regular expression where only matching host names will be imported. The filter is applied as a post-processing step after any inventory plugin filters are applied.`,
|
||||
sourceVars: (docsBaseUrl, source) => {
|
||||
|
||||
@@ -71,6 +71,7 @@ const InventorySourceFormFields = ({
|
||||
source_project: null,
|
||||
source_script: null,
|
||||
source_vars: '---\n',
|
||||
scm_branch: null,
|
||||
update_cache_timeout: 0,
|
||||
update_on_launch: false,
|
||||
verbosity: 1,
|
||||
@@ -248,6 +249,7 @@ const InventorySourceForm = ({
|
||||
source_project: source?.summary_fields?.source_project || null,
|
||||
source_script: source?.summary_fields?.source_script || null,
|
||||
source_vars: source?.source_vars || '---\n',
|
||||
scm_branch: source?.scm_branch || '',
|
||||
update_cache_timeout: source?.update_cache_timeout || 0,
|
||||
update_on_launch: source?.update_on_launch || false,
|
||||
verbosity: source?.verbosity || 1,
|
||||
|
||||
@@ -13,6 +13,7 @@ import { required } from 'util/validators';
|
||||
import CredentialLookup from 'components/Lookup/CredentialLookup';
|
||||
import ProjectLookup from 'components/Lookup/ProjectLookup';
|
||||
import Popover from 'components/Popover';
|
||||
import FormField from 'components/FormField';
|
||||
import {
|
||||
OptionsField,
|
||||
SourceVarsField,
|
||||
@@ -36,7 +37,6 @@ const SCMSubForm = ({ autoPopulateProject }) => {
|
||||
name: 'source_path',
|
||||
validate: required(t`Select a value for this field`),
|
||||
});
|
||||
|
||||
const { error: sourcePathError, request: fetchSourcePath } = useRequest(
|
||||
useCallback(async (projectId) => {
|
||||
const { data } = await ProjectsAPI.readInventories(projectId);
|
||||
@@ -44,7 +44,6 @@ const SCMSubForm = ({ autoPopulateProject }) => {
|
||||
}, []),
|
||||
[]
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (projectMeta.initialValue) {
|
||||
fetchSourcePath(projectMeta.initialValue.id);
|
||||
@@ -58,6 +57,7 @@ const SCMSubForm = ({ autoPopulateProject }) => {
|
||||
(value) => {
|
||||
setFieldValue('source_project', value);
|
||||
setFieldTouched('source_project', true, false);
|
||||
setFieldValue('scm_branch', '', false);
|
||||
if (sourcePathField.value) {
|
||||
setFieldValue('source_path', '');
|
||||
setFieldTouched('source_path', false);
|
||||
@@ -68,7 +68,6 @@ const SCMSubForm = ({ autoPopulateProject }) => {
|
||||
},
|
||||
[fetchSourcePath, setFieldValue, setFieldTouched, sourcePathField.value]
|
||||
);
|
||||
|
||||
const handleCredentialUpdate = useCallback(
|
||||
(value) => {
|
||||
setFieldValue('credential', value);
|
||||
@@ -76,9 +75,17 @@ const SCMSubForm = ({ autoPopulateProject }) => {
|
||||
},
|
||||
[setFieldValue, setFieldTouched]
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
{projectField.value?.allow_override && (
|
||||
<FormField
|
||||
id="project-scm-branch"
|
||||
name="scm_branch"
|
||||
type="text"
|
||||
label={t`Source Control Branch/Tag/Commit`}
|
||||
tooltip={helpText.sourceControlBranch}
|
||||
/>
|
||||
)}
|
||||
<CredentialLookup
|
||||
credentialTypeKind="cloud"
|
||||
label={t`Credential`}
|
||||
|
||||
@@ -208,6 +208,7 @@ function AWXLogin({ alt, isAuthenticated }) {
|
||||
>
|
||||
{(formik) => (
|
||||
<LoginForm
|
||||
autoComplete="off"
|
||||
data-cy="login-form"
|
||||
className={authError ? 'pf-m-error' : ''}
|
||||
helperText={helperText}
|
||||
|
||||
@@ -115,6 +115,14 @@ describe('<Login />', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test.only('form has autocomplete off', async () => {
|
||||
let wrapper;
|
||||
await act(async () => {
|
||||
wrapper = mountWithContexts(<AWXLogin isAuthenticated={() => false} />);
|
||||
});
|
||||
expect(wrapper.find('form[autoComplete="off"]').length).toBe(1);
|
||||
});
|
||||
|
||||
test('custom logo renders Brand component with correct src and alt', async () => {
|
||||
let wrapper;
|
||||
await act(async () => {
|
||||
|
||||
@@ -150,6 +150,11 @@ function JobsEdit() {
|
||||
type={options?.SCHEDULE_MAX_JOBS ? 'number' : undefined}
|
||||
isRequired={Boolean(options?.SCHEDULE_MAX_JOBS)}
|
||||
/>
|
||||
<InputField
|
||||
name="AWX_RUNNER_KEEPALIVE_SECONDS"
|
||||
config={jobs.AWX_RUNNER_KEEPALIVE_SECONDS}
|
||||
type="number"
|
||||
/>
|
||||
<InputField
|
||||
name="DEFAULT_JOB_TIMEOUT"
|
||||
config={jobs.DEFAULT_JOB_TIMEOUT}
|
||||
|
||||
@@ -344,6 +344,16 @@
|
||||
"category_slug": "jobs",
|
||||
"default": 10
|
||||
},
|
||||
"AWX_RUNNER_KEEPALIVE_SECONDS": {
|
||||
"type": "integer",
|
||||
"required": true,
|
||||
"label": "K8S Ansible Runner Keep-Alive Message Interval",
|
||||
"help_text": "Only applies to K8S deployments and container_group jobs. If not 0, send a message every so-many seconds to keep connection open.",
|
||||
"category": "Jobs",
|
||||
"category_slug": "jobs",
|
||||
"placeholder": 240,
|
||||
"default": 0
|
||||
},
|
||||
"AWX_ANSIBLE_CALLBACK_PLUGINS": {
|
||||
"type": "list",
|
||||
"required": false,
|
||||
@@ -4098,6 +4108,15 @@
|
||||
"category_slug": "jobs",
|
||||
"defined_in_file": false
|
||||
},
|
||||
"AWX_RUNNER_KEEPALIVE_SECONDS": {
|
||||
"type": "integer",
|
||||
"label": "K8S Ansible Runner Keep-Alive Message Interval",
|
||||
"help_text": "Only applies to K8S deployments and container_group jobs. If not 0, send a message every so-many seconds to keep connection open.",
|
||||
"category": "Jobs",
|
||||
"category_slug": "jobs",
|
||||
"placeholder": 240,
|
||||
"default": 0
|
||||
},
|
||||
"AWX_ANSIBLE_CALLBACK_PLUGINS": {
|
||||
"type": "list",
|
||||
"label": "Ansible Callback Plugins",
|
||||
|
||||
@@ -51,6 +51,7 @@
|
||||
"STDOUT_MAX_BYTES_DISPLAY":1048576,
|
||||
"EVENT_STDOUT_MAX_BYTES_DISPLAY":1024,
|
||||
"SCHEDULE_MAX_JOBS":10,
|
||||
"AWX_RUNNER_KEEPALIVE_SECONDS": 0,
|
||||
"AWX_ANSIBLE_CALLBACK_PLUGINS":[],
|
||||
"DEFAULT_JOB_TIMEOUT":0,
|
||||
"DEFAULT_JOB_IDLE_TIMEOUT":0,
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
"STDOUT_MAX_BYTES_DISPLAY": 1048576,
|
||||
"EVENT_STDOUT_MAX_BYTES_DISPLAY": 1024,
|
||||
"SCHEDULE_MAX_JOBS": 10,
|
||||
"AWX_RUNNER_KEEPALIVE_SECONDS": 0,
|
||||
"AWX_ANSIBLE_CALLBACK_PLUGINS": [],
|
||||
"DEFAULT_JOB_TIMEOUT": 0,
|
||||
"DEFAULT_JOB_IDLE_TIMEOUT": 0,
|
||||
|
||||
@@ -6,6 +6,8 @@ action_groups:
|
||||
- ad_hoc_command_cancel
|
||||
- ad_hoc_command_wait
|
||||
- application
|
||||
- bulk_job_launch
|
||||
- bulk_host_create
|
||||
- controller_meta
|
||||
- credential_input_source
|
||||
- credential
|
||||
|
||||
102
awx_collection/plugins/modules/bulk_host_create.py
Normal file
102
awx_collection/plugins/modules/bulk_host_create.py
Normal file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bulk_host_create
|
||||
author: "Seth Foster (@fosterseth)"
|
||||
short_description: Bulk host create in Automation Platform Controller
|
||||
description:
|
||||
- Single-request bulk host creation in Automation Platform Controller.
|
||||
- Provides a way to add many hosts at once to an inventory in Controller.
|
||||
options:
|
||||
hosts:
|
||||
description:
|
||||
- List of hosts to add to inventory.
|
||||
required: True
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name to use for the host.
|
||||
type: str
|
||||
required: True
|
||||
description:
|
||||
description:
|
||||
- The description to use for the host.
|
||||
type: str
|
||||
enabled:
|
||||
description:
|
||||
- If the host should be enabled.
|
||||
type: bool
|
||||
variables:
|
||||
description:
|
||||
- Variables to use for the host.
|
||||
type: dict
|
||||
instance_id:
|
||||
description:
|
||||
- instance_id to use for the host.
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory name or ID the hosts should be made a member of.
|
||||
required: True
|
||||
type: str
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Bulk host create
|
||||
bulk_host_create:
|
||||
inventory: 1
|
||||
hosts:
|
||||
- name: foobar.org
|
||||
- name: 127.0.0.1
|
||||
'''
|
||||
|
||||
from ..module_utils.controller_api import ControllerAPIModule
|
||||
import json
|
||||
|
||||
|
||||
def main():
|
||||
# Any additional arguments that are not fields of the item can be added here
|
||||
argument_spec = dict(
|
||||
hosts=dict(required=True, type='list', elements='dict'),
|
||||
inventory=dict(required=True, type='str'),
|
||||
)
|
||||
|
||||
# Create a module for ourselves
|
||||
module = ControllerAPIModule(argument_spec=argument_spec)
|
||||
|
||||
# Extract our parameters
|
||||
inv_name = module.params.get('inventory')
|
||||
hosts = module.params.get('hosts')
|
||||
|
||||
for h in hosts:
|
||||
if 'variables' in h:
|
||||
h['variables'] = json.dumps(h['variables'])
|
||||
|
||||
inv_id = module.resolve_name_to_id('inventories', inv_name)
|
||||
|
||||
# Launch the jobs
|
||||
result = module.post_endpoint("bulk/host_create", data={"inventory": inv_id, "hosts": hosts})
|
||||
|
||||
if result['status_code'] != 201:
|
||||
module.fail_json(msg="Failed to create hosts, see response for details", response=result)
|
||||
|
||||
module.json_output['changed'] = True
|
||||
|
||||
module.exit_json(**module.json_output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
281
awx_collection/plugins/modules/bulk_job_launch.py
Normal file
281
awx_collection/plugins/modules/bulk_job_launch.py
Normal file
@@ -0,0 +1,281 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8 -*-
|
||||
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bulk_job_launch
|
||||
author: "Seth Foster (@fosterseth)"
|
||||
short_description: Bulk job launch in Automation Platform Controller
|
||||
description:
|
||||
- Single-request bulk job launch in Automation Platform Controller.
|
||||
- Creates a workflow where each node corresponds to an item specified in the jobs option.
|
||||
- Any options specified at the top level will inherited by the launched jobs (if prompt on launch is enabled for those fields).
|
||||
- Provides a way to submit many jobs at once to Controller.
|
||||
options:
|
||||
jobs:
|
||||
description:
|
||||
- List of jobs to create.
|
||||
required: True
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
unified_job_template:
|
||||
description:
|
||||
- Job template ID to use when launching.
|
||||
type: int
|
||||
required: True
|
||||
inventory:
|
||||
description:
|
||||
- Inventory ID applied as a prompt, if job template prompts for inventory
|
||||
type: int
|
||||
execution_environment:
|
||||
description:
|
||||
- Execution environment ID applied as a prompt, if job template prompts for execution environments
|
||||
type: int
|
||||
instance_groups:
|
||||
description:
|
||||
- Instance group IDs applied as a prompt, if job template prompts for instance groups
|
||||
type: list
|
||||
elements: int
|
||||
credentials:
|
||||
description:
|
||||
- Credential IDs applied as a prompt, if job template prompts for credentials
|
||||
type: list
|
||||
elements: int
|
||||
labels:
|
||||
description:
|
||||
- Label IDs to use for the job, if job template prompts for labels
|
||||
type: list
|
||||
elements: int
|
||||
extra_data:
|
||||
description:
|
||||
- Extra variables to apply at launch time, if job template prompts for extra variables
|
||||
type: dict
|
||||
default: {}
|
||||
diff_mode:
|
||||
description:
|
||||
- Show the changes made by Ansible tasks where supported
|
||||
type: bool
|
||||
verbosity:
|
||||
description:
|
||||
- Verbosity level for this ad hoc command run
|
||||
type: int
|
||||
choices: [ 0, 1, 2, 3, 4, 5 ]
|
||||
scm_branch:
|
||||
description:
|
||||
- SCM branch applied as a prompt, if job template prompts for SCM branch
|
||||
- This is only applicable if the project allows for branch override
|
||||
type: str
|
||||
job_type:
|
||||
description:
|
||||
- Job type applied as a prompt, if job template prompts for job type
|
||||
type: str
|
||||
choices:
|
||||
- 'run'
|
||||
- 'check'
|
||||
job_tags:
|
||||
description:
|
||||
- Job tags applied as a prompt, if job template prompts for job tags
|
||||
type: str
|
||||
skip_tags:
|
||||
description:
|
||||
- Tags to skip, applied as a prompt, if job template prompts for job tags
|
||||
type: str
|
||||
limit:
|
||||
description:
|
||||
- Limit to act on, applied as a prompt, if job template prompts for limit
|
||||
type: str
|
||||
forks:
|
||||
description:
|
||||
- The number of parallel or simultaneous processes to use while executing the playbook, if job template prompts for forks
|
||||
type: int
|
||||
job_slice_count:
|
||||
description:
|
||||
- The number of jobs to slice into at runtime, if job template prompts for job slices.
|
||||
- Will cause the Job Template to launch a workflow if value is greater than 1.
|
||||
type: int
|
||||
default: '1'
|
||||
identifier:
|
||||
description:
|
||||
- Identifier for the resulting workflow node that represents this job
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- Maximum time in seconds to wait for a job to finish (server-side), if job template prompts for timeout.
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- The name of the bulk job that is created
|
||||
required: False
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Optional description of this bulk job.
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- If not provided, will use the organization the user is in.
|
||||
- Required if the user belongs to more than one organization.
|
||||
- Affects who can see the resulting bulk job.
|
||||
type: str
|
||||
inventory:
|
||||
description:
|
||||
- Inventory name or ID to use for the jobs ran within the bulk job, only used if prompt for inventory is set.
|
||||
type: str
|
||||
scm_branch:
|
||||
description:
|
||||
- A specific branch of the SCM project to run the template on.
|
||||
- This is only applicable if the project allows for branch override.
|
||||
type: str
|
||||
extra_vars:
|
||||
description:
|
||||
- Any extra vars required to launch the job.
|
||||
- Extends the extra_data field at the individual job level.
|
||||
type: dict
|
||||
limit:
|
||||
description:
|
||||
- Limit to use for the bulk job.
|
||||
type: str
|
||||
job_tags:
|
||||
description:
|
||||
- A comma-separated list of playbook tags to specify what parts of the playbooks should be executed.
|
||||
type: str
|
||||
skip_tags:
|
||||
description:
|
||||
- A comma-separated list of playbook tags to skip certain tasks or parts of the playbooks to be executed.
|
||||
type: str
|
||||
wait:
|
||||
description:
|
||||
- Wait for the bulk job to complete.
|
||||
default: True
|
||||
type: bool
|
||||
interval:
|
||||
description:
|
||||
- The interval to request an update from the controller.
|
||||
required: False
|
||||
default: 2
|
||||
type: float
|
||||
extends_documentation_fragment: awx.awx.auth
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
job_info:
|
||||
description: dictionary containing information about the bulk job executed
|
||||
returned: If bulk job launched
|
||||
type: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Launch bulk jobs
|
||||
bulk_job_launch:
|
||||
name: My Bulk Job Launch
|
||||
jobs:
|
||||
- unified_job_template: 7
|
||||
skip_tags: foo
|
||||
- unified_job_template: 10
|
||||
limit: foo
|
||||
extra_data:
|
||||
food: carrot
|
||||
color: orange
|
||||
limit: bar
|
||||
extra_vars: # these override / extend extra_data at the job level
|
||||
food: grape
|
||||
animal: owl
|
||||
organization: Default
|
||||
inventory: Demo Inventory
|
||||
|
||||
- name: Launch bulk jobs with lookup plugin
|
||||
bulk_job_launch:
|
||||
name: My Bulk Job Launch
|
||||
jobs:
|
||||
- unified_job_template: 7
|
||||
- unified_job_template: "{{ lookup('awx.awx.controller_api', 'job_templates', query_params={'name': 'Demo Job Template'},
|
||||
return_ids=True, expect_one=True) }}"
|
||||
'''
|
||||
|
||||
from ..module_utils.controller_api import ControllerAPIModule
|
||||
|
||||
|
||||
def main():
|
||||
# Any additional arguments that are not fields of the item can be added here
|
||||
argument_spec = dict(
|
||||
jobs=dict(required=True, type='list', elements='dict'),
|
||||
name=dict(),
|
||||
description=dict(),
|
||||
organization=dict(type='str'),
|
||||
inventory=dict(type='str'),
|
||||
limit=dict(),
|
||||
scm_branch=dict(),
|
||||
extra_vars=dict(type='dict'),
|
||||
job_tags=dict(),
|
||||
skip_tags=dict(),
|
||||
wait=dict(required=False, default=True, type='bool'),
|
||||
interval=dict(required=False, default=2.0, type='float'),
|
||||
)
|
||||
|
||||
# Create a module for ourselves
|
||||
module = ControllerAPIModule(argument_spec=argument_spec)
|
||||
|
||||
post_data_names = (
|
||||
'jobs',
|
||||
'name',
|
||||
'description',
|
||||
'limit',
|
||||
'scm_branch',
|
||||
'extra_vars',
|
||||
'job_tags',
|
||||
'skip_tags',
|
||||
)
|
||||
post_data = {}
|
||||
for p in post_data_names:
|
||||
val = module.params.get(p)
|
||||
if val:
|
||||
post_data[p] = val
|
||||
|
||||
# Resolve name to ID for related resources
|
||||
# Do not resolve name for "jobs" suboptions, for optimization
|
||||
org_name = module.params.get('organization')
|
||||
if org_name:
|
||||
post_data['organization'] = module.resolve_name_to_id('organizations', org_name)
|
||||
|
||||
inv_name = module.params.get('inventory')
|
||||
if inv_name:
|
||||
post_data['inventory'] = module.resolve_name_to_id('inventories', inv_name)
|
||||
|
||||
# Extract our parameters
|
||||
wait = module.params.get('wait')
|
||||
timeout = module.params.get('timeout')
|
||||
interval = module.params.get('interval')
|
||||
name = module.params.get('name')
|
||||
|
||||
# Launch the jobs
|
||||
result = module.post_endpoint("bulk/job_launch", data=post_data)
|
||||
|
||||
if result['status_code'] != 201:
|
||||
module.fail_json(msg="Failed to launch bulk jobs, see response for details", response=result)
|
||||
|
||||
module.json_output['changed'] = True
|
||||
module.json_output['id'] = result['json']['id']
|
||||
module.json_output['status'] = result['json']['status']
|
||||
# This is for backwards compatability
|
||||
module.json_output['job_info'] = result['json']
|
||||
|
||||
if not wait:
|
||||
module.exit_json(**module.json_output)
|
||||
|
||||
# Invoke wait function
|
||||
module.wait_on_url(url=result['json']['url'], object_name=name, object_type='Bulk Job Launch', timeout=timeout, interval=interval)
|
||||
|
||||
module.exit_json(**module.json_output)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -105,6 +105,11 @@ options:
|
||||
description:
|
||||
- Project to use as source with scm option
|
||||
type: str
|
||||
scm_branch:
|
||||
description:
|
||||
- Inventory source SCM branch.
|
||||
- Project must have branch override enabled.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Desired state of the resource.
|
||||
@@ -178,6 +183,7 @@ def main():
|
||||
update_on_launch=dict(type='bool'),
|
||||
update_cache_timeout=dict(type='int'),
|
||||
source_project=dict(),
|
||||
scm_branch=dict(type='str'),
|
||||
notification_templates_started=dict(type="list", elements='str'),
|
||||
notification_templates_success=dict(type="list", elements='str'),
|
||||
notification_templates_error=dict(type="list", elements='str'),
|
||||
@@ -272,6 +278,7 @@ def main():
|
||||
'enabled_var',
|
||||
'enabled_value',
|
||||
'host_filter',
|
||||
'scm_branch',
|
||||
)
|
||||
|
||||
# Layer in all remaining optional information
|
||||
|
||||
43
awx_collection/test/awx/test_bulk.py
Normal file
43
awx_collection/test/awx/test_bulk.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import pytest
|
||||
|
||||
from awx.main.models import WorkflowJob
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_job_launch(run_module, admin_user, job_template):
|
||||
jobs = [dict(unified_job_template=job_template.id)]
|
||||
run_module(
|
||||
'bulk_job_launch',
|
||||
{
|
||||
'name': "foo-bulk-job",
|
||||
'jobs': jobs,
|
||||
'extra_vars': {'animal': 'owl'},
|
||||
'limit': 'foo',
|
||||
'wait': False,
|
||||
},
|
||||
admin_user,
|
||||
)
|
||||
|
||||
bulk_job = WorkflowJob.objects.get(name="foo-bulk-job")
|
||||
assert bulk_job.extra_vars == '{"animal": "owl"}'
|
||||
assert bulk_job.limit == "foo"
|
||||
|
||||
|
||||
@pytest.mark.django_db
|
||||
def test_bulk_host_create(run_module, admin_user, inventory):
|
||||
hosts = [dict(name="127.0.0.1"), dict(name="foo.dns.org")]
|
||||
run_module(
|
||||
'bulk_host_create',
|
||||
{
|
||||
'inventory': inventory.name,
|
||||
'hosts': hosts,
|
||||
},
|
||||
admin_user,
|
||||
)
|
||||
resp_hosts = inventory.hosts.all().values_list('name', flat=True)
|
||||
for h in hosts:
|
||||
assert h['name'] in resp_hosts
|
||||
@@ -44,6 +44,12 @@ no_endpoint_for_module = [
|
||||
'subscriptions', # Subscription deals with config/subscriptions
|
||||
]
|
||||
|
||||
# Add modules with endpoints that are not at /api/v2
|
||||
extra_endpoints = {
|
||||
'bulk_job_launch': '/api/v2/bulk/job_launch/',
|
||||
'bulk_host_create': '/api/v2/bulk/host_create/',
|
||||
}
|
||||
|
||||
# Global module parameters we can ignore
|
||||
ignore_parameters = ['state', 'new_name', 'update_secrets', 'copy_from']
|
||||
|
||||
@@ -73,6 +79,8 @@ no_api_parameter_ok = {
|
||||
'user': ['new_username', 'organization'],
|
||||
# workflow_approval parameters that do not apply when approving an approval node.
|
||||
'workflow_approval': ['action', 'interval', 'timeout', 'workflow_job_id'],
|
||||
# bulk
|
||||
'bulk_job_launch': ['interval', 'wait'],
|
||||
}
|
||||
|
||||
# When this tool was created we were not feature complete. Adding something in here indicates a module
|
||||
@@ -228,6 +236,10 @@ def test_completeness(collection_import, request, admin_user, job_template, exec
|
||||
user=admin_user,
|
||||
expect=None,
|
||||
)
|
||||
|
||||
for key, val in extra_endpoints.items():
|
||||
endpoint_response.data[key] = val
|
||||
|
||||
for endpoint in endpoint_response.data.keys():
|
||||
# Module names are singular and endpoints are plural so we need to convert to singular
|
||||
singular_endpoint = '{0}'.format(endpoint)
|
||||
|
||||
@@ -112,6 +112,7 @@ def test_falsy_value(run_module, admin_user, base_inventory):
|
||||
# credential ? ? o o r r r r r r r o
|
||||
# source_project ? ? r - - - - - - - - -
|
||||
# source_path ? ? r - - - - - - - - -
|
||||
# scm_branch ? ? r - - - - - - - - -
|
||||
# verbosity ? ? o o o o o o o o o o
|
||||
# overwrite ? ? o o o o o o o o o o
|
||||
# overwrite_vars ? ? o o o o o o o o o o
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
---
|
||||
- name: Generate a random string for test
|
||||
set_fact:
|
||||
test_id: "{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}"
|
||||
when: test_id is not defined
|
||||
|
||||
- name: Generate a unique name
|
||||
set_fact:
|
||||
bulk_inv_name: "AWX-Collection-tests-bulk_host_create-{{ test_id }}"
|
||||
|
||||
- name: Get our collection package
|
||||
controller_meta:
|
||||
register: controller_meta
|
||||
|
||||
- name: Generate the name of our plugin
|
||||
set_fact:
|
||||
plugin_name: "{{ controller_meta.prefix }}.controller_api"
|
||||
|
||||
|
||||
- name: Create an inventory
|
||||
inventory:
|
||||
name: "{{ bulk_inv_name }}"
|
||||
organization: Default
|
||||
state: present
|
||||
register: inventory_result
|
||||
|
||||
|
||||
- name: Bulk Host Create
|
||||
bulk_host_create:
|
||||
hosts:
|
||||
- name: "123.456.789.123"
|
||||
description: "myhost1"
|
||||
variables:
|
||||
food: carrot
|
||||
color: orange
|
||||
- name: example.dns.gg
|
||||
description: "myhost2"
|
||||
enabled: false
|
||||
inventory: "{{ bulk_inv_name }}"
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not failed
|
||||
|
||||
# cleanup
|
||||
- name: Delete inventory
|
||||
inventory:
|
||||
name: "{{ bulk_inv_name }}"
|
||||
organization: Default
|
||||
state: absent
|
||||
@@ -0,0 +1,70 @@
|
||||
---
|
||||
- name: Generate a random string for test
|
||||
set_fact:
|
||||
test_id: "{{ lookup('password', '/dev/null chars=ascii_letters length=16') }}"
|
||||
when: test_id is not defined
|
||||
|
||||
- name: Generate a unique name
|
||||
set_fact:
|
||||
bulk_job_name: "AWX-Collection-tests-bulk_job_launch-{{ test_id }}"
|
||||
|
||||
- name: Get our collection package
|
||||
controller_meta:
|
||||
register: controller_meta
|
||||
|
||||
- name: Generate the name of our plugin
|
||||
set_fact:
|
||||
plugin_name: "{{ controller_meta.prefix }}.controller_api"
|
||||
|
||||
- name: Get Inventory
|
||||
set_fact:
|
||||
inventory_id: "{{ lookup(plugin_name, 'inventories', query_params={'name': 'Demo Inventory'}, return_ids=True ) }}"
|
||||
|
||||
- name: Create a Job Template
|
||||
job_template:
|
||||
name: "{{ bulk_job_name }}"
|
||||
copy_from: "Demo Job Template"
|
||||
ask_variables_on_launch: true
|
||||
ask_inventory_on_launch: true
|
||||
ask_skip_tags_on_launch: true
|
||||
allow_simultaneous: true
|
||||
state: present
|
||||
register: jt_result
|
||||
|
||||
- name: Create Bulk Job
|
||||
bulk_job_launch:
|
||||
name: "{{ bulk_job_name }}"
|
||||
jobs:
|
||||
- unified_job_template: "{{ jt_result.id }}"
|
||||
inventory: "{{ inventory_id }}"
|
||||
skip_tags: "skipfoo,skipbar"
|
||||
extra_data:
|
||||
animal: fish
|
||||
color: orange
|
||||
- unified_job_template: "{{ jt_result.id }}"
|
||||
extra_vars:
|
||||
animal: bear
|
||||
food: carrot
|
||||
skip_tags: "skipbaz"
|
||||
job_tags: "Hello World"
|
||||
limit: "localhost"
|
||||
wait: True
|
||||
inventory: Demo Inventory
|
||||
organization: Default
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is not failed
|
||||
- "'id' in result"
|
||||
- result['job_info']['skip_tags'] == "skipbaz"
|
||||
- result['job_info']['limit'] == "localhost"
|
||||
- result['job_info']['job_tags'] == "Hello World"
|
||||
- result['job_info']['inventory'] == {{ inventory_id }}
|
||||
- "result['job_info']['extra_vars'] == '{\"animal\": \"bear\", \"food\": \"carrot\"}'"
|
||||
|
||||
# cleanup
|
||||
- name: Delete Job Template
|
||||
job_template:
|
||||
name: "{{ bulk_job_name }}"
|
||||
state: absent
|
||||
@@ -1,6 +1,7 @@
|
||||
# Order matters
|
||||
from .page import * # NOQA
|
||||
from .base import * # NOQA
|
||||
from .bulk import * # NOQA
|
||||
from .access_list import * # NOQA
|
||||
from .api import * # NOQA
|
||||
from .authtoken import * # NOQA
|
||||
|
||||
24
awxkit/awxkit/api/pages/bulk.py
Normal file
24
awxkit/awxkit/api/pages/bulk.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from awxkit.api.resources import resources
|
||||
from . import base
|
||||
from . import page
|
||||
|
||||
|
||||
class Bulk(base.Base):
|
||||
def get(self, **query_parameters):
|
||||
request = self.connection.get(self.endpoint, query_parameters, headers={'Accept': 'application/json'})
|
||||
return self.page_identity(request)
|
||||
|
||||
|
||||
page.register_page([resources.bulk, (resources.bulk, 'get')], Bulk)
|
||||
|
||||
|
||||
class BulkJobLaunch(base.Base):
|
||||
def post(self, payload={}):
|
||||
result = self.connection.post(self.endpoint, payload)
|
||||
if 'url' in result.json():
|
||||
return self.walk(result.json()['url'])
|
||||
else:
|
||||
return self.page_identity(result, request_json={})
|
||||
|
||||
|
||||
page.register_page(resources.bulk_job_launch, BulkJobLaunch)
|
||||
@@ -319,6 +319,7 @@ class InventorySource(HasCreate, HasNotifications, UnifiedJobTemplate):
|
||||
optional_fields = (
|
||||
'source_path',
|
||||
'source_vars',
|
||||
'scm_branch',
|
||||
'timeout',
|
||||
'overwrite',
|
||||
'overwrite_vars',
|
||||
|
||||
@@ -13,6 +13,8 @@ class Resources(object):
|
||||
_applications = 'applications/'
|
||||
_auth = 'auth/'
|
||||
_authtoken = 'authtoken/'
|
||||
_bulk = 'bulk/'
|
||||
_bulk_job_launch = 'bulk/job_launch/'
|
||||
_config = 'config/'
|
||||
_config_attach = 'config/attach/'
|
||||
_credential = r'credentials/\d+/'
|
||||
|
||||
@@ -44,6 +44,10 @@ class CustomAction(metaclass=CustomActionRegistryMeta):
|
||||
|
||||
|
||||
class Launchable(object):
|
||||
@property
|
||||
def options_endpoint(self):
|
||||
return self.page.endpoint + '1/{}/'.format(self.action)
|
||||
|
||||
def add_arguments(self, parser, resource_options_parser, with_pk=True):
|
||||
from .options import pk_or_name
|
||||
|
||||
@@ -53,7 +57,7 @@ class Launchable(object):
|
||||
parser.choices[self.action].add_argument('--action-timeout', type=int, help='If set with --monitor or --wait, time out waiting on job completion.')
|
||||
parser.choices[self.action].add_argument('--wait', action='store_true', help='If set, waits until the launched job finishes.')
|
||||
|
||||
launch_time_options = self.page.connection.options(self.page.endpoint + '1/{}/'.format(self.action))
|
||||
launch_time_options = self.page.connection.options(self.options_endpoint)
|
||||
if launch_time_options.ok:
|
||||
launch_time_options = launch_time_options.json()['actions']['POST']
|
||||
resource_options_parser.options['LAUNCH'] = launch_time_options
|
||||
@@ -90,6 +94,48 @@ class JobTemplateLaunch(Launchable, CustomAction):
|
||||
resource = 'job_templates'
|
||||
|
||||
|
||||
class BulkJobLaunch(Launchable, CustomAction):
|
||||
action = 'job_launch'
|
||||
resource = 'bulk'
|
||||
|
||||
@property
|
||||
def options_endpoint(self):
|
||||
return self.page.endpoint + '{}/'.format(self.action)
|
||||
|
||||
def add_arguments(self, parser, resource_options_parser):
|
||||
Launchable.add_arguments(self, parser, resource_options_parser, with_pk=False)
|
||||
|
||||
def perform(self, **kwargs):
|
||||
monitor_kwargs = {
|
||||
'monitor': kwargs.pop('monitor', False),
|
||||
'wait': kwargs.pop('wait', False),
|
||||
'action_timeout': kwargs.pop('action_timeout', False),
|
||||
}
|
||||
response = self.page.get().job_launch.post(kwargs)
|
||||
self.monitor(response, **monitor_kwargs)
|
||||
return response
|
||||
|
||||
|
||||
class BulkHostCreate(CustomAction):
|
||||
action = 'host_create'
|
||||
resource = 'bulk'
|
||||
|
||||
@property
|
||||
def options_endpoint(self):
|
||||
return self.page.endpoint + '{}/'.format(self.action)
|
||||
|
||||
def add_arguments(self, parser, resource_options_parser):
|
||||
options = self.page.connection.options(self.options_endpoint)
|
||||
if options.ok:
|
||||
options = options.json()['actions']['POST']
|
||||
resource_options_parser.options['HOSTCREATEPOST'] = options
|
||||
resource_options_parser.build_query_arguments(self.action, 'HOSTCREATEPOST')
|
||||
|
||||
def perform(self, **kwargs):
|
||||
response = self.page.get().host_create.post(kwargs)
|
||||
return response
|
||||
|
||||
|
||||
class ProjectUpdate(Launchable, CustomAction):
|
||||
action = 'update'
|
||||
resource = 'projects'
|
||||
|
||||
@@ -163,7 +163,10 @@ class ResourceOptionsParser(object):
|
||||
if method == 'list' and param.get('filterable') is False:
|
||||
continue
|
||||
|
||||
def json_or_yaml(v):
|
||||
def list_of_json_or_yaml(v):
|
||||
return json_or_yaml(v, expected_type=list)
|
||||
|
||||
def json_or_yaml(v, expected_type=dict):
|
||||
if v.startswith('@'):
|
||||
v = open(os.path.expanduser(v[1:])).read()
|
||||
try:
|
||||
@@ -174,15 +177,16 @@ class ResourceOptionsParser(object):
|
||||
except Exception:
|
||||
raise argparse.ArgumentTypeError("{} is not valid JSON or YAML".format(v))
|
||||
|
||||
if not isinstance(parsed, dict):
|
||||
if not isinstance(parsed, expected_type):
|
||||
raise argparse.ArgumentTypeError("{} is not valid JSON or YAML".format(v))
|
||||
|
||||
for k, v in parsed.items():
|
||||
# add support for file reading at top-level JSON keys
|
||||
# (to make things like SSH key data easier to work with)
|
||||
if isinstance(v, str) and v.startswith('@'):
|
||||
path = os.path.expanduser(v[1:])
|
||||
parsed[k] = open(path).read()
|
||||
if expected_type is dict:
|
||||
for k, v in parsed.items():
|
||||
# add support for file reading at top-level JSON keys
|
||||
# (to make things like SSH key data easier to work with)
|
||||
if isinstance(v, str) and v.startswith('@'):
|
||||
path = os.path.expanduser(v[1:])
|
||||
parsed[k] = open(path).read()
|
||||
|
||||
return parsed
|
||||
|
||||
@@ -258,6 +262,19 @@ class ResourceOptionsParser(object):
|
||||
if k == 'extra_vars':
|
||||
args.append('-e')
|
||||
|
||||
# special handling for bulk endpoints
|
||||
if self.resource == 'bulk':
|
||||
if method == "host_create":
|
||||
if k == "inventory":
|
||||
kwargs['required'] = required = True
|
||||
if k == 'hosts':
|
||||
kwargs['type'] = list_of_json_or_yaml
|
||||
kwargs['required'] = required = True
|
||||
if method == "job_launch":
|
||||
if k == 'jobs':
|
||||
kwargs['type'] = list_of_json_or_yaml
|
||||
kwargs['required'] = required = True
|
||||
|
||||
if required:
|
||||
if required_group is None:
|
||||
required_group = self.parser.choices[method].add_argument_group('required arguments')
|
||||
|
||||
101
docs/bulk_api.md
Normal file
101
docs/bulk_api.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Bulk API Overview
|
||||
|
||||
Bulk API endpoints allows to perform bulk operations in single web request. There are currently following bulk api actions:
|
||||
- /api/v2/bulk/job_launch
|
||||
- /api/v2/bulk/host_create
|
||||
|
||||
Making individual API calls in rapid succession or at high concurrency can overwhelm AWX's ability to serve web requests. When the application's ability to serve is exausted, clients often receive 504 timeout errors.
|
||||
|
||||
Allowing the client combine actions into fewer requests allows for launching more jobs or adding more hosts with fewer requests and less time without exauhsting Controller's ability to serve requests, making excessive and repetitive database queries, or using excessive database connections (each web request opens a seperate database connection).
|
||||
|
||||
## Bulk Job Launch
|
||||
|
||||
Provides feature in the API that allows a single web request to achieve multiple job launches. It creates a workflow job with individual jobs as nodes within the workflow job. It also supports providing promptable fields like inventory, credential etc.
|
||||
|
||||
Following is an example of a post request at the /api/v2/bulk/job_launch
|
||||
|
||||
{
|
||||
"name": "Bulk Job Launch",
|
||||
"jobs": [
|
||||
{"unified_job_template": 7},
|
||||
{"unified_job_template": 8},
|
||||
{"unified_job_template": 9}
|
||||
]
|
||||
}
|
||||
|
||||
The above will launch a workflow job with 3 nodes in it.
|
||||
|
||||
The maximum number of jobs allowed to be launched in one bulk launch is controlled by the setting `BULK_JOB_MAX_LAUNCH`.
|
||||
|
||||
**Important Note: A bulk job launched by a normal user will not be visible in the jobs section of the UI, although the individual jobs within a bulk job can be seen there.**
|
||||
|
||||
If the job template has fields marked as prompt on launch, those can be provided for each job in the bulk job launch as well:
|
||||
|
||||
{
|
||||
"name": "Bulk Job Launch",
|
||||
"jobs": [
|
||||
{"unified_job_template": 11, "limit": "kansas", "credentials": [1], "inventory": 1}
|
||||
]
|
||||
}
|
||||
|
||||
In the above example `job template 11` has limit, credentials and inventory marked as prompt on launch and those are provided as parameters to the job.
|
||||
|
||||
Prompted field value can also be provided at the top level. For example:
|
||||
|
||||
{
|
||||
"name": "Bulk Job Launch",
|
||||
"jobs": [
|
||||
{"unified_job_template": 11, "limit": "kansas", "credentials": [1]},
|
||||
{"unified_job_template": 12},
|
||||
{"unified_job_template": 13}
|
||||
],
|
||||
"inventory": 2
|
||||
}
|
||||
|
||||
In the above example, `inventory: 2` will get used for the job templates (11, 12 and 13) in which inventory is marked as prompt of launch.
|
||||
|
||||
*Note:* The `instance_groups` relationship is not supported for node-level prompts, unlike `"credentials"` in the above example, and will be ignored if provided. See OPTIONS for `/api/v2/bulk/job_launch/` for what fields are accepted at the workflow and node level, as that is the ultimate source of truth to determine what fields the API will accept.
|
||||
|
||||
### RBAC For Bulk Job Launch
|
||||
|
||||
#### Who can bulk launch?
|
||||
Anyone who is logged in can view the launch point. In order to launch a unified_job_template, you need to have either `update` or `execute` depending on the type of unified job (job template, project update, etc).
|
||||
|
||||
Launching using the bulk endpoint results in a workflow job being launched. For auditing purposes, in general we require to assign an organization to the resulting workflow. The logic for assigning this organization is as follows:
|
||||
|
||||
- Superusers may assign any organization or none. If they do not assign one, they will be the only user able to see the parent workflow.
|
||||
- Users that are members of exactly 1 organization do not need to specify an organization, as their single organization will be used to assign to the resulting Workflow
|
||||
- Users that are members of multiple organizations must specify the organization to assign to the resulting workflow. If they do not specify, an error will be returned indicating this requirement.
|
||||
|
||||
Example of specifying the organization:
|
||||
|
||||
{
|
||||
"name": "Bulk Job Launch with org specified",
|
||||
"jobs": [
|
||||
{"unified_job_template": 12},
|
||||
{"unified_job_template": 13}
|
||||
],
|
||||
"organization": 2
|
||||
}
|
||||
|
||||
#### Who can see bulk jobs that have been run?
|
||||
System admins and Organization admins will see Bulk Jobs in the workflow jobs list and the unified jobs list. They can additionally see these individual workflow jobs.
|
||||
|
||||
Regular users can only see the individual workflow jobs that were launched by their bulk job launch. These jobs do not appear in the unified jobs list, nor do they show in the workflow jobs list. This is important because the response to a bulk job launch includes a link to the parent workflow job.
|
||||
|
||||
## Bulk Host Create
|
||||
|
||||
Provides feature in the API that allows a single web request to create multiple hosts in an inventory.
|
||||
|
||||
Following is an example of a post request at the /api/v2/bulk/host_create:
|
||||
|
||||
|
||||
{
|
||||
"inventory": 1,
|
||||
"hosts": [{"name": "host1", "variables": "ansible_connection: local"}, {"name": "host2"}, {"name": "host3"}, {"name": "host4"}, {"name": "host5"}, {"name": "host6"}]
|
||||
}
|
||||
|
||||
|
||||
The above will add 6 hosts in the inventory.
|
||||
|
||||
The maximum number of hosts allowed to be added is controlled by the setting `BULK_HOST_MAX_CREATE`. The default is 100 hosts. Additionally, nginx limits the maximum payload size, which is very likely when posting a large number of hosts in one request with variable data associated with them. The maximum payload size is 1MB unless overridden in your nginx config.
|
||||
@@ -14,6 +14,10 @@ make awx-link
|
||||
make version_file
|
||||
|
||||
if [[ -n "$RUN_MIGRATIONS" ]]; then
|
||||
# wait for postgres to be ready
|
||||
while ! nc -z postgres 5432; do
|
||||
echo "Waiting for postgres to be ready to accept connections"; sleep 1;
|
||||
done;
|
||||
make migrate
|
||||
else
|
||||
wait-for-migrations
|
||||
|
||||
Reference in New Issue
Block a user